Beispiel #1
0
/*
 * Move pages from one kernel virtual address to another.
 * Both addresses are assumed to reside in the Sysmap.
 */
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
    pt_entry_t *fpte, *tpte, *mafpte, *matpte;
    pt_entry_t ofpte, otpte;
#ifdef MULTIPROCESSOR
    u_int32_t cpumask = 0;
#endif

#ifdef DIAGNOSTIC
    if ((size & PAGE_MASK) != 0)
        panic("pagemove");
#endif
    fpte = kvtopte((vaddr_t)from);
    tpte = kvtopte((vaddr_t)to);
    while (size > 0) {
        mafpte = (pt_entry_t *)vtomach((vaddr_t)fpte);
        matpte = (pt_entry_t *)vtomach((vaddr_t)tpte);
        otpte = pte_atomic_update(tpte, matpte, *fpte);
        ofpte = pte_atomic_update(fpte, mafpte, 0);
        tpte++;
        fpte++;
#if defined(I386_CPU) && !defined(MULTIPROCESSOR)
        if (cpu_class != CPUCLASS_386)
#endif
        {
            if (otpte & PG_V)
#ifdef MULTIPROCESSOR
                pmap_tlb_shootdown(pmap_kernel(), (vaddr_t)to,
                                   otpte, &cpumask);
#else
                pmap_update_pg((vaddr_t)to);
#endif
            if (ofpte & PG_V)
#ifdef MULTIPROCESSOR
                pmap_tlb_shootdown(pmap_kernel(),
                                   (vaddr_t)from, ofpte, &cpumask);
#else
                pmap_update_pg((vaddr_t)from);
#endif
        }

        from += PAGE_SIZE;
        to += PAGE_SIZE;
        size -= PAGE_SIZE;
    }
#ifdef MULTIPROCESSOR
    pmap_tlb_shootnow(cpumask);
#else
#if defined(I386_CPU)
    if (cpu_class == CPUCLASS_386)
        tlbflush();
#endif
#endif
}
/*
 * Add a receive buffer to the indicated descriptor.
 */
int
ni_add_rxbuf(struct ni_softc *sc, struct ni_dg *data, int idx)
{
	struct ni_bbd *bd = &bbd[idx];
	struct mbuf *m;

	MGETHDR(m, M_DONTWAIT, MT_DATA);
	if (m == NULL)
		return (ENOBUFS);

	MCLGET(m, M_DONTWAIT);
	if ((m->m_flags & M_EXT) == 0) {
		m_freem(m);
		return (ENOBUFS);
	}

	m->m_data += 2;
	bd->nb_len = (m->m_ext.ext_size - 2);
	bd->nb_pte = (long)kvtopte(m->m_ext.ext_buf);
	bd->nb_status = 2 | NIBD_VALID;
	bd->nb_key = 1;

	data->bufs[0]._offset = 0;
	data->bufs[0]._len = bd->nb_len;
	data->bufs[0]._index = idx;
	data->nd_cmdref = (long)m;

	return (0);
}
Beispiel #3
0
/*
 * Load appropriate gdt descriptor; we better be running on *ci
 * (for the most part, this is how a CPU knows who it is).
 */
void
gdt_init_cpu(struct cpu_info *ci)
{
#ifndef XEN
    struct region_descriptor region;
    size_t max_len;

    max_len = MAXGDTSIZ * sizeof(gdt[0]);
    setregion(&region, ci->ci_gdt, max_len - 1);
    lgdt(&region);
#else
    size_t len = gdt_size[0] * sizeof(gdt[0]);
    unsigned long frames[len >> PAGE_SHIFT];
    vaddr_t va;
    pt_entry_t *ptp;
    int f;

    for (va = (vaddr_t)ci->ci_gdt, f = 0;
            va < (vaddr_t)ci->ci_gdt + len;
            va += PAGE_SIZE, f++) {
        KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
        ptp = kvtopte(va);
        frames[f] = *ptp >> PAGE_SHIFT;
        pmap_pte_clearbits(ptp, PG_RW);
    }
    /* printk("loading gdt %x, %d entries, %d pages", */
    /* frames[0] << PAGE_SHIFT, gdt_size[0], len >> PAGE_SHIFT); */
    if (HYPERVISOR_set_gdt(frames, gdt_size[0]))
        panic("HYPERVISOR_set_gdt failed!\n");
    lgdt_finish();
#endif
}
Beispiel #4
0
void
xen_set_ldt(vaddr_t base, uint32_t entries)
{
	vaddr_t va;
	vaddr_t end;
	pt_entry_t *ptp;
	int s;

#ifdef __x86_64__
	end = base + (entries << 3);
#else
	end = base + entries * sizeof(union descriptor);
#endif

	for (va = base; va < end; va += PAGE_SIZE) {
		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
		ptp = kvtopte(va);
		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
		    base, entries, ptp));
		pmap_pte_clearbits(ptp, PG_RW);
	}
	s = splvm();
	xpq_queue_set_ldt(base, entries);
	splx(s);
}
Beispiel #5
0
void
save_u_area(struct proc *p, vaddr_t va)
{
	int i;

	for (i = 0; i < UPAGES; i++) {
		p->p_md.md_upte[i] = *((pt_entry_t *)kvtopte(va));
		va += NBPG;
	}
}
void
physunaccess(void *vaddr, int size)
{
	pt_entry_t *pte;

	pte = kvtopte(vaddr);
	for (size = btoc(size); size; size--)
		*pte++ = PG_NV;
	TBIAS();
}
Beispiel #7
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 * Copy and update the pcb and trap frame, making the child ready to run.
 *
 * Rig the child's kernel stack so that it will start out in
 * lwp_trampoline() and call child_return() with p2 as an
 * argument. This causes the newly-created child process to go
 * directly to user level with an apparent return value of 0 from
 * fork(), while the parent process returns normally.
 *
 * p1 is the process being forked; if p1 == &proc0, we are creating
 * a kernel thread, and the return path and argument are specified with
 * `func' and `arg'.
 *
 * If an alternate user-level stack is requested (with non-zero values
 * in both the stack and stacksize args), set up the user stack pointer
 * accordingly.
 */
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcb;
	struct frame *f;
	pt_entry_t *pte;
	int i, x, y;

	l2->l_md.md_ss_addr = 0;
	l2->l_md.md_ss_instr = 0;
	l2->l_md.md_astpending = 0;

#ifdef DIAGNOSTIC
	/*
	 * If l1 != curlwp && l1 == &lwp0, we're creating a kernel thread.
	 */
	if (l1 != curlwp && l1 != &lwp0)
		panic("cpu_lwp_fork: curlwp");
#endif

	/*
	 * Copy pcb from proc p1 to p2.
	 * Copy p1 trapframe atop on p2 stack space, so return to user mode
	 * will be to right address, with correct registers.
	 */
	memcpy(&l2->l_addr->u_pcb, &l1->l_addr->u_pcb, sizeof(struct pcb));
	f = (struct frame *)((char *)l2->l_addr + USPACE) - 1;
	memcpy(f, l1->l_md.md_regs, sizeof(struct frame));

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		f->f_regs[_R_SP] = (uintptr_t)stack + stacksize;

	l2->l_md.md_regs = (void *)f;
	l2->l_md.md_flags = l1->l_md.md_flags;

	/* XXXAVR32 As MIPS_HAS_R4KMMU */
	x = AVR32_PG_ACCESS;
	y = (AVR32_PG_GLOBAL | AVR32_PG_ACCESS_RW | AVR32_PTE_WIRED | AVR32_PTE_VALID); 
	pte = kvtopte(l2->l_addr);
	for (i = 0; i < UPAGES; i++)
		l2->l_md.md_upte[i] = (pte[i].pt_entry &~ x) | y;

	pcb = &l2->l_addr->u_pcb;
	pcb->pcb_context[10] = (intptr_t)func;		/* XXXAVR32 */
	pcb->pcb_context[9] = (intptr_t)arg;		/* XXXAVR32 */
	pcb->pcb_context[8] = (intptr_t)l2;		/* XXXAVR32 */
	pcb->pcb_context[2] = (intptr_t)f;		/* SP */
	pcb->pcb_context[1] = (intptr_t)lwp_trampoline;/* LR */

}
Beispiel #8
0
/*
 * Write bytes to kernel address space for debugger.
 */
void
db_write_bytes(vaddr_t addr, size_t size, char *data)
{
	char	*dst;

	pt_entry_t *ptep0 = 0;
	pt_entry_t	oldmap0 = { 0 };
	vaddr_t	addr1;
	pt_entry_t *ptep1 = 0;
	pt_entry_t	oldmap1 = { 0 };
	extern char	etext;

	if (addr >= VM_MIN_KERNEL_ADDRESS &&
	    addr < (vaddr_t)&etext) {
		ptep0 = kvtopte(addr);
		oldmap0 = *ptep0;
		*(int *)ptep0 |= /* INTEL_PTE_WRITE */ PG_RW;

		addr1 = trunc_page(addr + size - 1);
		if (trunc_page(addr) != addr1) {
			/* data crosses a page boundary */
			ptep1 = kvtopte(addr1);
			oldmap1 = *ptep1;
			*(int *)ptep1 |= /* INTEL_PTE_WRITE */ PG_RW;
		}
		tlbflush();
	}

	dst = (char *)addr;

	while (size-- > 0)
		*dst++ = *data++;

	if (ptep0) {
		*ptep0 = oldmap0;
		if (ptep1)
			*ptep1 = oldmap1;
		tlbflush();
	}
}
/*
 * Map `size' bytes of physical memory starting at `paddr' into
 * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
 * are specified by `prot'.
 */
void
physaccess(void *vaddr, void *paddr, int size, int prot)
{
	pt_entry_t *pte;
	u_int page;

	pte = kvtopte(vaddr);
	page = (u_int)paddr & PG_FRAME;
	for (size = btoc(size); size; size--) {
		*pte++ = PG_V | prot | page;
		page += PAGE_SIZE;
	}
	TBIAS();
}
Beispiel #10
0
void
load_u_area(struct proc *p)
{
	int i;
	vaddr_t va;
	pt_entry_t *t;

	for (i = 0, va = UADDR; i < UPAGES; i++) {
		t = kvtopte(va);
		*t = p->p_md.md_upte[i];
		va += NBPG;
	}
	cmmu_flush_tlb(cpu_number(), 1, UADDR, USPACE);
}
Beispiel #11
0
static void
update_descriptor(union descriptor *table, union descriptor *entry)
{
#ifndef XEN
    *table = *entry;
#else
    paddr_t pa;
    pt_entry_t *ptp;

    ptp = kvtopte((vaddr_t)table);
    pa = (*ptp & PG_FRAME) | ((vaddr_t)table & ~PG_FRAME);
    if (HYPERVISOR_update_descriptor(pa, entry->raw[0], entry->raw[1]))
        panic("HYPERVISOR_update_descriptor failed\n");
#endif
}
Beispiel #12
0
/*
 * Move pages from one kernel virtual address to another.
 * Both addresses are assumed to reside in the Sysmap.
 */
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
	pt_entry_t *fpte, *tpte, ofpte, otpte;
	int32_t cpumask = 0;

#ifdef DIAGNOSTIC
	if ((size & PAGE_MASK) != 0)
		panic("pagemove");
#endif
	fpte = kvtopte((vaddr_t)from);
	tpte = kvtopte((vaddr_t)to);
#ifdef LARGEPAGES
	/* XXX For now... */
	if (*fpte & PG_PS)
		panic("pagemove: fpte PG_PS");
	if (*tpte & PG_PS)
		panic("pagemove: tpte PG_PS");
#endif
	while (size > 0) {
		otpte = *tpte;
		ofpte = *fpte;
		*tpte++ = *fpte;
		*fpte++ = 0;
		if (otpte & PG_V)
			pmap_tlb_shootdown(pmap_kernel(),
			    (vaddr_t)to, otpte, &cpumask);
		if (ofpte & PG_V)
			pmap_tlb_shootdown(pmap_kernel(),
			    (vaddr_t)from, ofpte, &cpumask);
		from += PAGE_SIZE;
		to += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	pmap_tlb_shootnow(cpumask);
}
Beispiel #13
0
int
kgdb_acc(vaddr_t va, size_t len)
{
	vaddr_t last_va;
	pt_entry_t *pte;

	last_va = va + len;
	va &= ~PGOFSET;
	last_va &= PGOFSET;

	do {
		pte = kvtopte(va);
		if ((*pte & PG_V) == 0)
			return (0);
		va += NBPG;
	} while (va < last_va);

	return (1);
}
/*
 * Determine if the memory at va..(va+len) is valid.
 */
int
kgdb_acc(vaddr_t va, size_t len)
{
	vaddr_t last_va;
	pt_entry_t *pte;

	last_va = va + len;
	va  &= ~PGOFSET;
	last_va &= ~PGOFSET;

	do {
		if (va < VM_MIN_KERNEL_ADDRESS)
			pte = vtopte(va);
		else
			pte = kvtopte(va);
		if ((*pte & PG_V) == 0)
			return (0);
		va += PAGE_SIZE;
	} while (va < last_va);

	return (1);
}
/*
 * Map a (kernel) virtual address to a physical address.
 *
 * MIPS processor has 3 distinct kernel address ranges:
 *
 * - kseg0 kernel "virtual address" for the   cached physical address space.
 * - kseg1 kernel "virtual address" for the uncached physical address space.
 * - kseg2 normal kernel "virtual address" mapped via the TLB.
 */
paddr_t
kvtophys(vaddr_t kva)
{
	pt_entry_t *pte;
	paddr_t phys;

	if (kva >= VM_MIN_KERNEL_ADDRESS) {
		if (kva >= VM_MAX_KERNEL_ADDRESS)
			goto overrun;

		pte = kvtopte(kva);
		if ((size_t) (pte - Sysmap) >= Sysmapsize)  {
			printf("oops: Sysmap overrun, max %d index %zd\n",
			       Sysmapsize, pte - Sysmap);
		}
		if (!mips_pg_v(pte->pt_entry)) {
			printf("kvtophys: pte not valid for %#"PRIxVADDR"\n",
			    kva);
		}
		phys = mips_tlbpfn_to_paddr(pte->pt_entry) | (kva & PGOFSET);
		return phys;
	}
	if (MIPS_KSEG1_P(kva))
		return MIPS_KSEG1_TO_PHYS(kva);

	if (MIPS_KSEG0_P(kva))
		return MIPS_KSEG0_TO_PHYS(kva);
#ifdef _LP64
	if (MIPS_XKPHYS_P(kva))
		return MIPS_XKPHYS_TO_PHYS(kva);
#endif
overrun:
	printf("Virtual address %#"PRIxVADDR": cannot map to physical\n", kva);
#ifdef DDB
	Debugger();
	return 0;	/* XXX */
#endif
	panic("kvtophys");
}
Beispiel #16
0
BOOLEAN
acpi_md_OsReadable(void *Pointer, uint32_t Length)
{
	BOOLEAN rv = TRUE;
	vaddr_t sva, eva;
	pt_entry_t *pte;

	sva = trunc_page((vaddr_t) Pointer);
	eva = round_page((vaddr_t) Pointer + Length);

	if (sva < VM_MIN_KERNEL_ADDRESS)
		return FALSE;

	for (; sva < eva; sva += PAGE_SIZE) {
		pte = kvtopte(sva);
		if ((*pte & PG_V) == 0) {
			rv = FALSE;
			break;
		}
	}

	return rv;
}
Beispiel #17
0
BOOLEAN
acpi_md_OsWritable(void *Pointer, UINT32 Length)
{
	BOOLEAN rv = FALSE;
	vaddr_t sva, eva;
	pt_entry_t *pte;

	sva = trunc_page((vaddr_t) Pointer);
	eva = round_page((vaddr_t) Pointer + Length);

	if (sva < VM_MIN_KERNEL_ADDRESS)
		return (FALSE);

	for (; sva < eva; sva += PAGE_SIZE) {
		pte = kvtopte(sva);
		if ((*pte & (PG_V|PG_W)) != (PG_V|PG_W)) {
			rv = FALSE;
			break;
		}
	}

	return (rv);
}
Beispiel #18
0
/*
 * Load appropriate gdt descriptor; we better be running on *ci
 * (for the most part, this is how a cpu knows who it is).
 */
void
gdt_init_cpu(struct cpu_info *ci)
{
	size_t len = gdt_size[0] * sizeof(gdt[0]);
	unsigned long frames[len >> PAGE_SHIFT];
	vaddr_t va;
	pt_entry_t *ptp;
	pt_entry_t *maptp;
	int f;

	for (va = (vaddr_t)ci->ci_gdt, f = 0;
	     va < (vaddr_t)ci->ci_gdt + len;
	     va += PAGE_SIZE, f++) {
		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
		ptp = kvtopte(va);
		frames[f] = *ptp >> PAGE_SHIFT;
		maptp = (pt_entry_t *)vtomach((vaddr_t)ptp);
		PTE_CLEARBITS(ptp, maptp, PG_RW);
	}
	PTE_UPDATES_FLUSH();
	if (HYPERVISOR_set_gdt(frames, gdt_size[0]))
		panic("HYPERVISOR_set_gdt failed!\n");
	lgdt_finish();
}
void
niintr(void *arg)
{
	struct ni_softc *sc = arg;
	struct ni_dg *data;
	struct ni_msg *msg;
	struct ifnet *ifp = &sc->sc_if;
	struct ni_bbd *bd;
	struct mbuf *m;
	int idx, res;

	if ((NI_RREG(NI_PSR) & PSR_STATE) != PSR_ENABLED)
		return;

	if ((NI_RREG(NI_PSR) & PSR_ERR))
		printf("%s: PSR %x\n", device_xname(sc->sc_dev), NI_RREG(NI_PSR));

	KERNEL_LOCK(1, NULL);
	/* Got any response packets?  */
	while ((NI_RREG(NI_PSR) & PSR_RSQ) && (data = REMQHI(&gvp->nc_forwr))) {

		switch (data->nd_opcode) {
		case BVP_DGRAMRX: /* Receive datagram */
			idx = data->bufs[0]._index;
			bd = &bbd[idx];
			m = (void *)data->nd_cmdref;
			m->m_pkthdr.len = m->m_len =
			    data->bufs[0]._len - ETHER_CRC_LEN;
			m->m_pkthdr.rcvif = ifp;
			if (ni_add_rxbuf(sc, data, idx)) {
				bd->nb_len = (m->m_ext.ext_size - 2);
				bd->nb_pte =
				    (long)kvtopte(m->m_ext.ext_buf);
				bd->nb_status = 2 | NIBD_VALID;
				bd->nb_key = 1;
			}
			data->nd_len = RXADD;
			data->nd_status = 0;
			res = INSQTI(data, &fqb->nf_rforw);
			if (res == Q_EMPTY) {
				WAITREG(NI_PCR, PCR_OWN);
				NI_WREG(NI_PCR, PCR_FREEQNE|PCR_RFREEQ|PCR_OWN);
			}
			if (m == (void *)data->nd_cmdref)
				break; /* Out of mbufs */

			bpf_mtap(ifp, m);
			(*ifp->if_input)(ifp, m);
			break;

		case BVP_DGRAM:
			m = (struct mbuf *)data->nd_cmdref;
			ifp->if_flags &= ~IFF_OACTIVE;
			m_freem(m);
			res = INSQTI(data, &fqb->nf_dforw);
			if (res == Q_EMPTY) {
				WAITREG(NI_PCR, PCR_OWN);
				NI_WREG(NI_PCR, PCR_FREEQNE|PCR_DFREEQ|PCR_OWN);
			}
			break;

		case BVP_MSGRX:
			msg = (struct ni_msg *)data;
			switch (msg->nm_opcode2) {
				case NI_WPARAM:
					memcpy(sc->sc_enaddr, ((struct ni_param *)&msg->nm_text[0])->np_dpa, ETHER_ADDR_LEN);
					endwait = 1;
					break;

				case NI_RCCNTR:
				case NI_CLPTDB:
				case NI_STPTDB:
					break;

				default:
					printf("Unkn resp %d\n",
					    msg->nm_opcode2);
					break;
			}
			res = INSQTI(data, &fqb->nf_mforw);
			if (res == Q_EMPTY) {
				WAITREG(NI_PCR, PCR_OWN);
				NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
			}
			break;

		default:
			printf("Unknown opcode %d\n", data->nd_opcode);
			res = INSQTI(data, &fqb->nf_mforw);
			if (res == Q_EMPTY) {
				WAITREG(NI_PCR, PCR_OWN);
				NI_WREG(NI_PCR, PCR_FREEQNE|PCR_MFREEQ|PCR_OWN);
			}
		}
	}

	/* Try to kick on the start routine again */
	nistart(ifp);

	NI_WREG(NI_PSR, NI_RREG(NI_PSR) & ~(PSR_OWN|PSR_RSQ));
	KERNEL_UNLOCK_ONE(NULL);
}
/*
 * cpu_lwp_fork: Finish a fork operation, with lwp l2 nearly set up.
 * Copy and update the pcb and trapframe, making the child ready to run.
 *
 * First LWP (l1) is the lwp being forked.  If it is &lwp0, then we are
 * creating a kthread, where return path and argument are specified
 * with `func' and `arg'.
 *
 * Rig the child's kernel stack so that it will start out in lwp_trampoline()
 * and call child_return() with l2 as an argument. This causes the
 * newly-created child process to go directly to user level with an apparent
 * return value of 0 from fork(), while the parent process returns normally.
 *
 * If an alternate user-level stack is requested (with non-zero values
 * in both the stack and stacksize arguments), then set up the user stack
 * pointer accordingly.
 */
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb * const pcb1 = lwp_getpcb(l1);
	struct pcb * const pcb2 = lwp_getpcb(l2);
	struct trapframe *tf;

	KASSERT(l1 == curlwp || l1 == &lwp0);

	l2->l_md.md_ss_addr = 0;
	l2->l_md.md_ss_instr = 0;
	l2->l_md.md_astpending = 0;

	/* Copy the PCB from parent. */
	*pcb2 = *pcb1;

	/*
	 * Copy the trapframe from parent, so that return to userspace
	 * will be to right address, with correct registers.
	 */
	vaddr_t ua2 = uvm_lwp_getuarea(l2);
	tf = (struct trapframe *)(ua2 + USPACE) - 1;
	*tf = *l1->l_md.md_utf;

	/* If specified, set a different user stack for a child. */
	if (stack != NULL)
		tf->tf_regs[_R_SP] = (intptr_t)stack + stacksize;

	l2->l_md.md_utf = tf;
#if USPACE > PAGE_SIZE
	bool direct_mapped_p = MIPS_KSEG0_P(ua2);
#ifdef _LP64
	direct_mapped_p = direct_mapped_p || MIPS_XKPHYS_P(ua2);
#endif
	if (!direct_mapped_p) {
		pt_entry_t * const pte = kvtopte(ua2);
		const uint32_t x = (MIPS_HAS_R4K_MMU) ?
		    (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : MIPS1_PG_G;

		for (u_int i = 0; i < UPAGES; i++) {
			l2->l_md.md_upte[i] = pte[i].pt_entry &~ x;
		}
	}
#endif
	/*
	 * Rig kernel stack so that it would start out in lwp_trampoline()
	 * and call child_return() with l as an argument.  This causes the
	 * newly-created child process to go directly to user level with a
	 * parent return value of 0 from fork(), while the parent process
	 * returns normally.
	 */

	pcb2->pcb_context.val[_L_S0] = (intptr_t)func;			/* S0 */
	pcb2->pcb_context.val[_L_S1] = (intptr_t)arg;			/* S1 */
	pcb2->pcb_context.val[MIPS_CURLWP_LABEL] = (intptr_t)l2;	/* T8 */
	pcb2->pcb_context.val[_L_SP] = (intptr_t)tf;			/* SP */
	pcb2->pcb_context.val[_L_RA] =
	   mips_locore_jumpvec.ljv_lwp_trampoline;			/* RA */
#ifdef _LP64
	KASSERT(pcb2->pcb_context.val[_L_SR] & MIPS_SR_KX);
#endif
	KASSERT(pcb2->pcb_context.val[_L_SR] & MIPS_SR_INT_IE);
}
static void
db_write_text(db_addr_t addr, size_t size, const char *data)
{
	char *dst, *odst;
	pt_entry_t *pte, oldpte, tmppte;
	vaddr_t pgva;
	int limit;

	dst = (char *)addr;
	while (size > 0) {

		/*
		 * Get the VA for the page.
		 */
		pgva = trunc_page((vaddr_t)dst);

		/*
		 * Save this destination address, for TLB flush.
		 */
		odst = dst;

		/*
		 * Compute number of bytes that can be written
		 * with this mapping and subtract it from the total size.
		 */
		limit = round_page((vaddr_t)dst + 1) - (vaddr_t)dst;
		if (limit > size)
			limit = size;
		size -= limit;

#ifdef M68K_MMU_HP
		/*
		 * Flush the supervisor side of the VAC to
		 * prevent a cache hit on the old, read-only PTE.
		 */
		if (ectype == EC_VIRT)
			DCIS();
#endif

		/*
		 * Make the page writable.  Note the mapping is
		 * cache-inhibited to save hair.
		 */
		pte = kvtopte(pgva);
		oldpte = *pte;
		if ((oldpte & PG_V) == 0) {
			printf(" address %p not a valid page\n", dst);
			return;
		}
		tmppte = (oldpte & ~PG_RO) | PG_RW | PG_CI;
		*pte = tmppte;
		TBIS((vaddr_t)odst);

		/*
		 * Page is now writable.  Do as much access as we can.
		 */
		for (; limit > 0; limit--)
			*dst++ = *data++;

		/*
		 * Restore the old PTE.
		 */
		*pte = oldpte;
		TBIS((vaddr_t)odst);
	}

	/*
	 * Invalidate the instruction cache so our changes take effect.
	 */
	ICIA();
}
Beispiel #22
0
/*
 * Trap is called from locore to handle most types of processor traps.
 */
void
trap(unsigned int status, unsigned int cause, vaddr_t vaddr, vaddr_t opc,
	struct trapframe *frame) 
{
	int type;
	struct lwp *l = curlwp;
	struct proc *p = curproc;
	vm_prot_t ftype;
	ksiginfo_t ksi;
	struct frame *fp;
	extern void fswintrberr(void);
	KSI_INIT_TRAP(&ksi);

	uvmexp.traps++;

	if ((type = TRAPTYPE(cause)) >= LENGTH(trap_type))
		panic("trap: unknown trap type %d", type);

	if (USERMODE(status)) {
		type |= T_USER;
		LWP_CACHE_CREDS(l, p);
	}

	/* Enable interrupts just at it was before the trap. */
	_splset(status & AVR32_STATUS_IMx);

	switch (type) {
	default:
	dopanic:
		(void)splhigh();
		printf("trap: %s in %s mode\n",
			trap_type[TRAPTYPE(cause)],
			USERMODE(status) ? "user" : "kernel");
		printf("status=0x%x, cause=0x%x, epc=%#lx, vaddr=%#lx\n",
			status, cause, opc, vaddr);
		if (curlwp != NULL) {
			fp = (struct frame *)l->l_md.md_regs;
			printf("pid=%d cmd=%s usp=0x%x ",
			    p->p_pid, p->p_comm, (int)fp->f_regs[_R_SP]);
		} else
			printf("curlwp == NULL ");
		printf("ksp=%p\n", &status);
#if defined(DDB)
		kdb_trap(type, (mips_reg_t *) frame);
		/* XXX force halt XXX */
#elif defined(KGDB)
		{
			struct frame *f = (struct frame *)&ddb_regs;
			extern mips_reg_t kgdb_cause, kgdb_vaddr;
			kgdb_cause = cause;
			kgdb_vaddr = vaddr;

			/*
			 * init global ddb_regs, used in db_interface.c routines
			 * shared between ddb and gdb. Send ddb_regs to gdb so
			 * that db_machdep.h macros will work with it, and
			 * allow gdb to alter the PC.
			 */
			db_set_ddb_regs(type, (mips_reg_t *) frame);
			PC_BREAK_ADVANCE(f);
			if (kgdb_trap(type, &ddb_regs)) {
				((mips_reg_t *)frame)[21] = f->f_regs[_R_PC];
				return;
			}
		}
#else
		panic("trap: dopanic: notyet");
#endif
		/*NOTREACHED*/
	case T_TLB_MOD:
		panic("trap: T_TLB_MOD: notyet");
#if notyet
		if (KERNLAND(vaddr)) {
			pt_entry_t *pte;
			unsigned entry;
			paddr_t pa;

			pte = kvtopte(vaddr);
			entry = pte->pt_entry;
			if (!avr32_pte_v(entry) /*|| (entry & mips_pg_m_bit())*/) {
				panic("ktlbmod: invalid pte");
			}
			if (entry & avr32_pte_ropage_bit()) {
				/* write to read only page in the kernel */
				ftype = VM_PROT_WRITE;
				goto kernelfault;
			}
			entry |= mips_pg_m_bit();	/* XXXAVR32 Do it on tlbarlo/ tlbarhi? */
			pte->pt_entry = entry;
			vaddr &= ~PGOFSET;
			MachTLBUpdate(vaddr, entry);
			pa = avr32_tlbpfn_to_paddr(entry);
			if (!IS_VM_PHYSADDR(pa)) {
				printf("ktlbmod: va %#lx pa %#llx\n",
				    vaddr, (long long)pa);
				panic("ktlbmod: unmanaged page");
			}
			pmap_set_modified(pa);
			return; /* KERN */
		}
		/*FALLTHROUGH*/
#endif
	case T_TLB_MOD+T_USER: 
		panic("trap: T_TLB_MOD+T_USER: notyet");
#if notyet
	    {
		pt_entry_t *pte;
		unsigned entry;
		paddr_t pa;
		pmap_t pmap;

		pmap  = p->p_vmspace->vm_map.pmap;
		if (!(pte = pmap_segmap(pmap, vaddr)))
			panic("utlbmod: invalid segmap");
		pte += (vaddr >> PGSHIFT) & (NPTEPG - 1);

		entry = pte->pt_entry;
		if (!avr32_pte_v(entry))
			panic("utlbmod: invalid pte");

		if (entry & avr32_pte_ropage_bit()) {
			/* write to read only page */
			ftype = VM_PROT_WRITE;
			goto pagefault;
		}
		/* entry |= mips_pg_m_bit();  XXXAVR32 Do it on tlbarlo/ tlbarhi? */
		pte->pt_entry = entry;
		vaddr = (vaddr & ~PGOFSET) |
			(pmap->pm_asid << AVR32_TLB_PID_SHIFT);
		MachTLBUpdate(vaddr, entry);
		pa = avr32_tlbpfn_to_paddr(entry);
		if (!IS_VM_PHYSADDR(pa)) {
			printf("utlbmod: va %#lx pa %#llx\n",
			    vaddr, (long long)pa);
			panic("utlbmod: unmanaged page");
		}
		pmap_set_modified(pa);
		if (type & T_USER)
			userret(l);
		return; /* GEN */
	    }
#endif
	case T_TLB_LD_MISS:
		panic("trap: T_TLB_LD_MISS: notyet");
	case T_TLB_ST_MISS:
		ftype = (type == T_TLB_LD_MISS) ? VM_PROT_READ : VM_PROT_WRITE;
		if (KERNLAND(vaddr))
			goto kernelfault;
		panic("trap: T_TLB_ST_MISS: notyet");
#if notyet
		/*
		 * It is an error for the kernel to access user space except
		 * through the copyin/copyout routines.
		 */
		if (l == NULL  || l->l_addr->u_pcb.pcb_onfault == NULL)
			goto dopanic;
		/* check for fuswintr() or suswintr() getting a page fault */
		if (l->l_addr->u_pcb.pcb_onfault == (void *)fswintrberr) {
			frame->tf_regs[TF_EPC] = (int)fswintrberr;
			return; /* KERN */
		}
		goto pagefault;
#endif
	case T_TLB_LD_MISS+T_USER:
		panic("trap: T_TLB_LD_MISS+T_USER: notyet");
#if notyet
		ftype = VM_PROT_READ;
		goto pagefault;
#endif
	case T_TLB_ST_MISS+T_USER:
		panic("trap: T_TLB_ST_MISS+T_USER: notyet");
#if notyet
		ftype = VM_PROT_WRITE;
#endif
	pagefault: ;
	    {
		vaddr_t va;
		struct vmspace *vm;
		struct vm_map *map;
		int rv;

		vm = p->p_vmspace;
		map = &vm->vm_map;
		va = trunc_page(vaddr);

		if ((l->l_flag & LW_SA) && (~l->l_pflag & LP_SA_NOBLOCK)) {
			l->l_savp->savp_faultaddr = (vaddr_t)vaddr;
			l->l_pflag |= LP_SA_PAGEFAULT;
		}

		if (p->p_emul->e_fault)
			rv = (*p->p_emul->e_fault)(p, va, ftype);
		else
			rv = uvm_fault(map, va, ftype);
				
#ifdef VMFAULT_TRACE
		printf(
	    "uvm_fault(%p (pmap %p), %lx (0x%x), %d) -> %d at pc %p\n",
		    map, vm->vm_map.pmap, va, vaddr, ftype, rv, (void*)opc);
#endif
		/*
		 * If this was a stack access we keep track of the maximum
		 * accessed stack size.  Also, if vm_fault gets a protection
		 * failure it is due to accessing the stack region outside
		 * the current limit and we need to reflect that as an access
		 * error.
		 */
		if ((void *)va >= vm->vm_maxsaddr) {
			if (rv == 0){
				uvm_grow(p, va);
			}
			else if (rv == EACCES)
				rv = EFAULT;
		}
		l->l_pflag &= ~LP_SA_PAGEFAULT;
		if (rv == 0) {
			if (type & T_USER) {
				userret(l);
			}
			return; /* GEN */
		}
		if ((type & T_USER) == 0)
			goto copyfault;
		if (rv == ENOMEM) {
			printf("UVM: pid %d (%s), uid %d killed: out of swap\n",
			       p->p_pid, p->p_comm,
			       l->l_cred ?
			       kauth_cred_geteuid(l->l_cred) : (uid_t) -1);
			ksi.ksi_signo = SIGKILL;
			ksi.ksi_code = 0;
		} else {
			if (rv == EACCES) {
				ksi.ksi_signo = SIGBUS;
				ksi.ksi_code = BUS_OBJERR;
			} else {
				ksi.ksi_signo = SIGSEGV;
				ksi.ksi_code = SEGV_MAPERR;
			}
		}
		ksi.ksi_trap = type & ~T_USER;
		ksi.ksi_addr = (void *)vaddr;
		break; /* SIGNAL */
	    }
	kernelfault: ;
	    {
		vaddr_t va;
		int rv;

		va = trunc_page(vaddr);
		rv = uvm_fault(kernel_map, va, ftype);
		if (rv == 0)
			return; /* KERN */
		/*FALLTHROUGH*/
	    }
	case T_ADDR_ERR_LD:	/* misaligned access */
	case T_ADDR_ERR_ST:	/* misaligned access */
	case T_BUS_ERR_LD_ST:	/* BERR asserted to CPU */
	copyfault:
		panic("trap: copyfault: notyet");
#if notyet
		if (l == NULL || l->l_addr->u_pcb.pcb_onfault == NULL)
			goto dopanic;
		frame->tf_regs[TF_EPC] = (intptr_t)l->l_addr->u_pcb.pcb_onfault;
		return; /* KERN */
#endif
#if notyet
	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to CPU */
	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to CPU */
		ksi.ksi_trap = type & ~T_USER;
		ksi.ksi_signo = SIGSEGV; /* XXX */
		ksi.ksi_addr = (void *)vaddr;
		ksi.ksi_code = SEGV_MAPERR; /* XXX */
		break; /* SIGNAL */

	case T_BREAK:
		panic("trap: T_BREAK: notyet");
#if defined(DDB)
		kdb_trap(type, (avr32_reg_t *) frame);
		return;	/* KERN */
#elif defined(KGDB)
		{
			struct frame *f = (struct frame *)&ddb_regs;
			extern avr32_reg_t kgdb_cause, kgdb_vaddr;
			kgdb_cause = cause;
			kgdb_vaddr = vaddr;

			/*
			 * init global ddb_regs, used in db_interface.c routines
			 * shared between ddb and gdb. Send ddb_regs to gdb so
			 * that db_machdep.h macros will work with it, and
			 * allow gdb to alter the PC.
			 */
			db_set_ddb_regs(type, (avr32_reg_t *) frame);
			PC_BREAK_ADVANCE(f);
			if (!kgdb_trap(type, &ddb_regs))
				printf("kgdb: ignored %s\n",
				       trap_type[TRAPTYPE(cause)]);
			else
				((avr32_reg_t *)frame)[21] = f->f_regs[_R_PC];

			return;
		}
#else
		goto dopanic;
#endif
	case T_BREAK+T_USER:
	    {
		vaddr_t va;
		uint32_t instr;
		int rv;

		/* compute address of break instruction */
		va = (DELAYBRANCH(cause)) ? opc + sizeof(int) : opc;

		/* read break instruction */
		instr = fuiword((void *)va);

		if (l->l_md.md_ss_addr != va || instr != MIPS_BREAK_SSTEP) {
			ksi.ksi_trap = type & ~T_USER;
			ksi.ksi_signo = SIGTRAP;
			ksi.ksi_addr = (void *)va;
			ksi.ksi_code = TRAP_TRACE;
			break;
		}
		/*
		 * Restore original instruction and clear BP
		 */
		rv = suiword((void *)va, l->l_md.md_ss_instr);
		if (rv < 0) {
			vaddr_t sa, ea;
			sa = trunc_page(va);
			ea = round_page(va + sizeof(int) - 1);
			rv = uvm_map_protect(&p->p_vmspace->vm_map,
				sa, ea, VM_PROT_ALL, false);
			if (rv == 0) {
				rv = suiword((void *)va, l->l_md.md_ss_instr);
				(void)uvm_map_protect(&p->p_vmspace->vm_map,
				sa, ea, VM_PROT_READ|VM_PROT_EXECUTE, false);
			}
		}
		mips_icache_sync_all();		/* XXXJRT -- necessary? */
		mips_dcache_wbinv_all();	/* XXXJRT -- necessary? */

		if (rv < 0)
			printf("Warning: can't restore instruction at 0x%lx: 0x%x\n",
				l->l_md.md_ss_addr, l->l_md.md_ss_instr);
		l->l_md.md_ss_addr = 0;
		ksi.ksi_trap = type & ~T_USER;
		ksi.ksi_signo = SIGTRAP;
		ksi.ksi_addr = (void *)va;
		ksi.ksi_code = TRAP_BRKPT;
		break; /* SIGNAL */
	    }
	case T_RES_INST+T_USER:
	case T_COP_UNUSABLE+T_USER:
#if !defined(SOFTFLOAT) && !defined(NOFPU)
		if ((cause & MIPS_CR_COP_ERR) == 0x10000000) {
			struct frame *f;

			f = (struct frame *)l->l_md.md_regs;
			savefpregs(fpcurlwp);	  	/* yield FPA */
			loadfpregs(l);          	/* load FPA */
			fpcurlwp = l;
			l->l_md.md_flags |= MDP_FPUSED;
			f->f_regs[_R_SR] |= MIPS_SR_COP_1_BIT;
		} else
#endif
		{
			MachEmulateInst(status, cause, opc, l->l_md.md_regs);
		}
		userret(l);
		return; /* GEN */
	case T_FPE+T_USER:
		panic ("trap: T_FPE+T_USER: notyet");
#if defined(SOFTFLOAT)
		MachEmulateInst(status, cause, opc, l->l_md.md_regs);
#elif !defined(NOFPU)
		MachFPTrap(status, cause, opc, l->l_md.md_regs);
#endif
		userret(l);
		return; /* GEN */
	case T_OVFLOW+T_USER:
	case T_TRAP+T_USER:
		ksi.ksi_trap = type & ~T_USER;
		ksi.ksi_signo = SIGFPE;
		fp = (struct frame *)l->l_md.md_regs;
		ksi.ksi_addr = (void *)fp->f_regs[_R_PC];
		ksi.ksi_code = FPE_FLTOVF; /* XXX */
		break; /* SIGNAL */
#endif
	}
	panic("trap: post-switch: notyet");
#if notyet
	fp = (struct frame *)l->l_md.md_regs;
	fp->f_regs[_R_CAUSE] = cause;
	fp->f_regs[_R_BADVADDR] = vaddr;
	(*p->p_emul->e_trapsignal)(l, &ksi);
	if ((type & T_USER) == 0)
		panic("trapsignal");
	userret(l);
#endif
	return;
}
/*
 * Disassemble instruction at 'loc'.  'altfmt' specifies an
 * (optional) alternate format.  Return address of start of
 * next instruction.
 */
db_addr_t
db_disasm(
    db_addr_t	loc,
    bool	altfmt)
{
	int	inst;
	int	size;
	int	short_addr;
	const char *	seg;
	const struct inst *	ip;
	const char *	i_name;
	int	i_size;
	int	i_mode;
	int	regmodrm = 0;
	bool	first;
	int	displ;
	int	prefix;
	int	imm;
	int	imm2;
	int	len;
	struct i_addr	address;

#ifdef _KERNEL
	pt_entry_t *pte, *pde;

	/*
	 * Don't try to disassemble the location if the mapping is invalid.
	 * If we do, we'll fault, and end up debugging the debugger!
	 * in the case of largepages, "pte" is really the pde and "pde" is
	 * really the entry for the pdp itself.
	 */
	if ((vaddr_t)loc >= VM_MIN_KERNEL_ADDRESS)
		pte = kvtopte((vaddr_t)loc);
	else
		pte = vtopte((vaddr_t)loc);
	pde = vtopte((vaddr_t)pte);
	if ((*pde & PG_V) == 0 || (*pte & PG_V) == 0) {
		db_printf("invalid address\n");
		return (loc);
	}
#endif

	get_value_inc(inst, loc, 1, false);
	short_addr = false;
	size = LONG;
	seg = 0;

	/*
	 * Get prefixes
	 */
	prefix = true;
	do {
		switch (inst) {
		    case 0x66:		/* data16 */
			size = WORD;
			break;
		    case 0x67:
			short_addr = true;
			break;
		    case 0x26:
			seg = "%es";
			break;
		    case 0x36:
			seg = "%ss";
			break;
		    case 0x2e:
			seg = "%cs";
			break;
		    case 0x3e:
			seg = "%ds";
			break;
		    case 0x64:
			seg = "%fs";
			break;
		    case 0x65:
			seg = "%gs";
			break;
		    case 0xf0:
			db_printf("lock ");
			break;
		    case 0xf2:
			db_printf("repne ");
			break;
		    case 0xf3:
			db_printf("repe ");	/* XXX repe VS rep */
			break;
		    default:
			prefix = false;
			break;
		}
		if (prefix)
			get_value_inc(inst, loc, 1, false);
	} while (prefix);

	if (inst >= 0xd8 && inst <= 0xdf) {
		loc = db_disasm_esc(loc, inst, short_addr, size, seg);
		db_printf("\n");
		return (loc);
	}

	if (inst == 0x0f) {
		get_value_inc(inst, loc, 1, false);
		ip = db_inst_0f[inst>>4];
		if (ip == 0)
			ip = &db_bad_inst;
		else
			ip = &ip[inst&0xf];
	} else {
Beispiel #24
0
/*
 * Write bytes somewhere in the kernel text.  Make the text
 * pages writable temporarily.
 */
static void
db_write_text(vaddr_t addr, size_t size, const char *data)
{
	pt_entry_t *ppte, pte;
	size_t limit;
	char *dst;

	if (size == 0)
		return;

	dst = (char *)addr;

	do {
		addr = (vaddr_t)dst;
		/*
		 * Get the PTE for the page.
		 */
		ppte = kvtopte(addr);
		pte = *ppte;

		if ((pte & PG_V) == 0) {
			printf(" address %p not a valid page\n", dst);
			return;
		}

		/*
		 * Compute number of bytes that can be written
		 * with this mapping and subtract it from the
		 * total size.
		 */
		if (pte & PG_PS)
			limit = NBPD_L2 - (addr & (NBPD_L2 - 1));
		else
			limit = PAGE_SIZE - (addr & PGOFSET);
		if (limit > size)
			limit = size;
		size -= limit;

		/*
		 * Make the kernel text page writable.
		 */
		pmap_pte_clearbits(ppte, PG_KR);
		pmap_pte_setbits(ppte, PG_KW);
		pmap_update_pg(addr);

		/*
		 * MULTIPROCESSOR: no shootdown required as the PTE continues to
		 * map the same page and other CPUs do not need write access.
		 */

		/*
		 * Page is now writable.  Do as much access as we
		 * can in this page.
		 */
		for (; limit > 0; limit--)
			*dst++ = *data++;

		/*
		 * Turn the page back to read-only.
		 */
		pmap_pte_clearbits(ppte, PG_KW);
		pmap_pte_setbits(ppte, PG_KR);
		pmap_update_pg(addr);

		/*
		 * MULTIPROCESSOR: no shootdown required as all other CPUs
		 * should be in CPUF_PAUSE state and will not cache the PTE
		 * with the write access set.
		 */
	} while (size != 0);
}
Beispiel #25
0
/*
 * Write bytes somewhere in the kernel text.  Make the text
 * pages writable temporarily.
 */
static void
db_write_text(vaddr_t addr, size_t size, const char *data)
{
	pt_entry_t *pte, oldpte, tmppte;
	vaddr_t pgva;
	size_t limit;
	char *dst;

	if (size == 0)
		return;

	dst = (char *)addr;

	do {
		/*
		 * Get the PTE for the page.
		 */
		pte = kvtopte(addr);
		oldpte = *pte;

		if ((oldpte & PG_V) == 0) {
			printf(" address %p not a valid page\n", dst);
			return;
		}

		/*
		 * Get the VA for the page.
		 */
		if (oldpte & PG_PS)
			pgva = (vaddr_t)dst & PG_LGFRAME;
		else
			pgva = x86_trunc_page(dst);

		/*
		 * Compute number of bytes that can be written
		 * with this mapping and subtract it from the
		 * total size.
		 */
		if (oldpte & PG_PS)
			limit = NBPD_L2 - ((vaddr_t)dst & (NBPD_L2 - 1));
		else
			limit = PAGE_SIZE - ((vaddr_t)dst & PGOFSET);
		if (limit > size)
			limit = size;
		size -= limit;

		tmppte = (oldpte & ~PG_KR) | PG_KW;
#ifdef XEN
		xpmap_update(pte, tmppte);
#else
		*pte = tmppte;
#endif
		pmap_update_pg(pgva);

		/*
		 * Page is now writable.  Do as much access as we
		 * can in this page.
		 */
		for (; limit > 0; limit--)
			*dst++ = *data++;

		/*
		 * Restore the old PTE.
		 */
#ifdef XEN
		xpmap_update(pte, oldpte);
#else
		*pte = oldpte;
#endif

		pmap_update_pg(pgva);
		
	} while (size != 0);
}
Beispiel #26
0
/*
 * Handle a single exception.
 */
void
itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
    int type)
{
	int i;
	unsigned ucode = 0;
	vm_prot_t ftype;
	extern vaddr_t onfault_table[];
	int onfault;
	int typ = 0;
	union sigval sv;
	struct pcb *pcb;

	switch (type) {
	case T_TLB_MOD:
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			pt_entry_t *pte, entry;
			paddr_t pa;
			vm_page_t pg;

			pte = kvtopte(trapframe->badvaddr);
			entry = *pte;
#ifdef DIAGNOSTIC
			if (!(entry & PG_V) || (entry & PG_M))
				panic("trap: ktlbmod: invalid pte");
#endif
			if (pmap_is_page_ro(pmap_kernel(),
			    trunc_page(trapframe->badvaddr), entry)) {
				/* write to read only page in the kernel */
				ftype = VM_PROT_WRITE;
				pcb = &p->p_addr->u_pcb;
				goto kernel_fault;
			}
			entry |= PG_M;
			*pte = entry;
			KERNEL_LOCK();
			pmap_update_kernel_page(trapframe->badvaddr & ~PGOFSET,
			    entry);
			pa = pfn_to_pad(entry);
			pg = PHYS_TO_VM_PAGE(pa);
			if (pg == NULL)
				panic("trap: ktlbmod: unmanaged page");
			pmap_set_modify(pg);
			KERNEL_UNLOCK();
			return;
		}
		/* FALLTHROUGH */

	case T_TLB_MOD+T_USER:
	    {
		pt_entry_t *pte, entry;
		paddr_t pa;
		vm_page_t pg;
		pmap_t pmap = p->p_vmspace->vm_map.pmap;

		if (!(pte = pmap_segmap(pmap, trapframe->badvaddr)))
			panic("trap: utlbmod: invalid segmap");
		pte += uvtopte(trapframe->badvaddr);
		entry = *pte;
#ifdef DIAGNOSTIC
		if (!(entry & PG_V) || (entry & PG_M))
			panic("trap: utlbmod: invalid pte");
#endif
		if (pmap_is_page_ro(pmap,
		    trunc_page(trapframe->badvaddr), entry)) {
			/* write to read only page */
			ftype = VM_PROT_WRITE;
			pcb = &p->p_addr->u_pcb;
			goto fault_common_no_miss;
		}
		entry |= PG_M;
		*pte = entry;
		KERNEL_LOCK();
		pmap_update_user_page(pmap, (trapframe->badvaddr & ~PGOFSET), 
		    entry);
		pa = pfn_to_pad(entry);
		pg = PHYS_TO_VM_PAGE(pa);
		if (pg == NULL)
			panic("trap: utlbmod: unmanaged page");
		pmap_set_modify(pg);
		KERNEL_UNLOCK();
		return;
	    }

	case T_TLB_LD_MISS:
	case T_TLB_ST_MISS:
		ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
		pcb = &p->p_addr->u_pcb;
		/* check for kernel address */
		if (trapframe->badvaddr < 0) {
			vaddr_t va;
			int rv;

	kernel_fault:
			va = trunc_page((vaddr_t)trapframe->badvaddr);
			onfault = pcb->pcb_onfault;
			pcb->pcb_onfault = 0;
			KERNEL_LOCK();
			rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype);
			KERNEL_UNLOCK();
			pcb->pcb_onfault = onfault;
			if (rv == 0)
				return;
			if (onfault != 0) {
				pcb->pcb_onfault = 0;
				trapframe->pc = onfault_table[onfault];
				return;
			}
			goto err;
		}
		/*
		 * It is an error for the kernel to access user space except
		 * through the copyin/copyout routines.
		 */
		if (pcb->pcb_onfault != 0) {
			/*
			 * We want to resolve the TLB fault before invoking
			 * pcb_onfault if necessary.
			 */
			goto fault_common;
		} else {
			goto err;
		}

	case T_TLB_LD_MISS+T_USER:
		ftype = VM_PROT_READ;
		pcb = &p->p_addr->u_pcb;
		goto fault_common;

	case T_TLB_ST_MISS+T_USER:
		ftype = VM_PROT_WRITE;
		pcb = &p->p_addr->u_pcb;
fault_common:

#ifdef CPU_R4000
		if (r4000_errata != 0) {
			if (eop_tlb_miss_handler(trapframe, ci, p) != 0)
				return;
		}
#endif

fault_common_no_miss:

#ifdef CPU_R4000
		if (r4000_errata != 0) {
			eop_cleanup(trapframe, p);
		}
#endif

	    {
		vaddr_t va;
		struct vmspace *vm;
		vm_map_t map;
		int rv;

		vm = p->p_vmspace;
		map = &vm->vm_map;
		va = trunc_page((vaddr_t)trapframe->badvaddr);

		onfault = pcb->pcb_onfault;
		pcb->pcb_onfault = 0;
		KERNEL_LOCK();

		rv = uvm_fault(map, va, 0, ftype);
		pcb->pcb_onfault = onfault;

		/*
		 * If this was a stack access we keep track of the maximum
		 * accessed stack size.  Also, if vm_fault gets a protection
		 * failure it is due to accessing the stack region outside
		 * the current limit and we need to reflect that as an access
		 * error.
		 */
		if ((caddr_t)va >= vm->vm_maxsaddr) {
			if (rv == 0)
				uvm_grow(p, va);
			else if (rv == EACCES)
				rv = EFAULT;
		}
		KERNEL_UNLOCK();
		if (rv == 0)
			return;
		if (!USERMODE(trapframe->sr)) {
			if (onfault != 0) {
				pcb->pcb_onfault = 0;
				trapframe->pc =  onfault_table[onfault];
				return;
			}
			goto err;
		}

		ucode = ftype;
		i = SIGSEGV;
		typ = SEGV_MAPERR;
		break;
	    }

	case T_ADDR_ERR_LD+T_USER:	/* misaligned or kseg access */
	case T_ADDR_ERR_ST+T_USER:	/* misaligned or kseg access */
		ucode = 0;		/* XXX should be VM_PROT_something */
		i = SIGBUS;
		typ = BUS_ADRALN;
		break;
	case T_BUS_ERR_IFETCH+T_USER:	/* BERR asserted to cpu */
	case T_BUS_ERR_LD_ST+T_USER:	/* BERR asserted to cpu */
		ucode = 0;		/* XXX should be VM_PROT_something */
		i = SIGBUS;
		typ = BUS_OBJERR;
		break;

	case T_SYSCALL+T_USER:
	    {
		struct trap_frame *locr0 = p->p_md.md_regs;
		struct sysent *callp;
		unsigned int code;
		register_t tpc;
		int numsys, error;
		struct args {
			register_t i[8];
		} args;
		register_t rval[2];

		atomic_add_int(&uvmexp.syscalls, 1);

		/* compute next PC after syscall instruction */
		tpc = trapframe->pc; /* Remember if restart */
		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
		callp = p->p_p->ps_emul->e_sysent;
		numsys = p->p_p->ps_emul->e_nsysent;
		code = locr0->v0;
		switch (code) {
		case SYS_syscall:
		case SYS___syscall:
			/*
			 * Code is first argument, followed by actual args.
			 * __syscall provides the code as a quad to maintain
			 * proper alignment of 64-bit arguments on 32-bit
			 * platforms, which doesn't change anything here.
			 */
			code = locr0->a0;
			if (code >= numsys)
				callp += p->p_p->ps_emul->e_nosys; /* (illegal) */
			else
				callp += code;
			i = callp->sy_argsize / sizeof(register_t);
			args.i[0] = locr0->a1;
			args.i[1] = locr0->a2;
			args.i[2] = locr0->a3;
			if (i > 3) {
				args.i[3] = locr0->a4;
				args.i[4] = locr0->a5;
				args.i[5] = locr0->a6;
				args.i[6] = locr0->a7;
				if (i > 7)
					if ((error = copyin((void *)locr0->sp,
					    &args.i[7], sizeof(register_t))))
						goto bad;
			}
			break;
		default:
			if (code >= numsys)
				callp += p->p_p->ps_emul->e_nosys; /* (illegal) */
			else
				callp += code;

			i = callp->sy_narg;
			args.i[0] = locr0->a0;
			args.i[1] = locr0->a1;
			args.i[2] = locr0->a2;
			args.i[3] = locr0->a3;
			if (i > 4) {
				args.i[4] = locr0->a4;
				args.i[5] = locr0->a5;
				args.i[6] = locr0->a6;
				args.i[7] = locr0->a7;
			}
		}

		rval[0] = 0;
		rval[1] = locr0->v1;

#if defined(DDB) || defined(DEBUG)
		trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ?
		    TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code;
#endif

		error = mi_syscall(p, code, callp, args.i, rval);

		switch (error) {
		case 0:
			locr0->v0 = rval[0];
			locr0->v1 = rval[1];
			locr0->a3 = 0;
			break;

		case ERESTART:
			locr0->pc = tpc;
			break;

		case EJUSTRETURN:
			break;	/* nothing to do */

		default:
		bad:
			locr0->v0 = error;
			locr0->a3 = 1;
		}

		mi_syscall_return(p, code, error, rval);

		return;
	    }

	case T_BREAK:
#ifdef DDB
		kdb_trap(type, trapframe);
#endif
		/* Reenable interrupts if necessary */
		if (trapframe->sr & SR_INT_ENAB) {
			enableintr();
		}
		return;

	case T_BREAK+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trap_frame *locr0 = p->p_md.md_regs;

		/* compute address of break instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;

		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) {
		case 6:	/* gcc range error */
			i = SIGFPE;
			typ = FPE_FLTSUB;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
		case 7:	/* gcc3 divide by zero */
			i = SIGFPE;
			typ = FPE_INTDIV;
			/* skip instruction */
			if (trapframe->cause & CR_BR_DELAY)
				locr0->pc = MipsEmulateBranch(locr0,
				    trapframe->pc, 0, 0);
			else
				locr0->pc += 4;
			break;
#ifdef PTRACE
		case BREAK_SSTEP_VAL:
			if (p->p_md.md_ss_addr == (long)va) {
#ifdef DEBUG
				printf("trap: %s (%d): breakpoint at %p "
				    "(insn %08x)\n",
				    p->p_comm, p->p_pid,
				    (void *)p->p_md.md_ss_addr,
				    p->p_md.md_ss_instr);
#endif

				/* Restore original instruction and clear BP */
				process_sstep(p, 0);
				typ = TRAP_BRKPT;
			} else {
				typ = TRAP_TRACE;
			}
			i = SIGTRAP;
			break;
#endif
#ifdef FPUEMUL
		case BREAK_FPUEMUL_VAL:
			/*
			 * If this is a genuine FP emulation break,
			 * resume execution to our branch destination.
			 */
			if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
			    p->p_md.md_fppgva + 4 == (vaddr_t)va) {
				struct vm_map *map = &p->p_vmspace->vm_map;

				p->p_md.md_flags &= ~MDP_FPUSED;
				locr0->pc = p->p_md.md_fpbranchva;

				/*
				 * Prevent access to the relocation page.
				 * XXX needs to be fixed to work with rthreads
				 */
				uvm_fault_unwire(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE);
				(void)uvm_map_protect(map, p->p_md.md_fppgva,
				    p->p_md.md_fppgva + PAGE_SIZE,
				    UVM_PROT_NONE, FALSE);
				return;
			}
			/* FALLTHROUGH */
#endif
		default:
			typ = TRAP_TRACE;
			i = SIGTRAP;
			break;
		}
		break;
	    }

	case T_IWATCH+T_USER:
	case T_DWATCH+T_USER:
	    {
		caddr_t va;
		/* compute address of trapped instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		printf("watch exception @ %p\n", va);
#ifdef RM7K_PERFCNTR
		if (rm7k_watchintr(trapframe)) {
			/* Return to user, don't add any more overhead */
			return;
		}
#endif
		i = SIGTRAP;
		typ = TRAP_BRKPT;
		break;
	    }

	case T_TRAP+T_USER:
	    {
		caddr_t va;
		u_int32_t instr;
		struct trap_frame *locr0 = p->p_md.md_regs;

		/* compute address of trap instruction */
		va = (caddr_t)trapframe->pc;
		if (trapframe->cause & CR_BR_DELAY)
			va += 4;
		/* read break instruction */
		copyin(va, &instr, sizeof(int32_t));

		if (trapframe->cause & CR_BR_DELAY)
			locr0->pc = MipsEmulateBranch(locr0,
			    trapframe->pc, 0, 0);
		else
			locr0->pc += 4;
#ifdef RM7K_PERFCNTR
		if (instr == 0x040c0000) { /* Performance cntr trap */
			int result;

			result = rm7k_perfcntr(trapframe->a0, trapframe->a1,
						trapframe->a2, trapframe->a3);
			locr0->v0 = -result;
			/* Return to user, don't add any more overhead */
			return;
		} else
#endif
		/*
		 * GCC 4 uses teq with code 7 to signal divide by
	 	 * zero at runtime. This is one instruction shorter
		 * than the BEQ + BREAK combination used by gcc 3.
		 */
		if ((instr & 0xfc00003f) == 0x00000034 /* teq */ &&
		    (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) {
			i = SIGFPE;
			typ = FPE_INTDIV;
		} else {
			i = SIGEMT;	/* Stuff it with something for now */
			typ = 0;
		}
		break;
	    }

	case T_RES_INST+T_USER:
		i = SIGILL;
		typ = ILL_ILLOPC;
		break;

	case T_COP_UNUSABLE+T_USER:
		/*
		 * Note MIPS IV COP1X instructions issued with FPU
		 * disabled correctly report coprocessor 1 as the
		 * unusable coprocessor number.
		 */
		if ((trapframe->cause & CR_COP_ERR) != CR_COP1_ERR) {
			i = SIGILL;	/* only FPU instructions allowed */
			typ = ILL_ILLOPC;
			break;
		}
#ifdef FPUEMUL
		MipsFPTrap(trapframe);
#else
		enable_fpu(p);
#endif
		return;

	case T_FPE:
		printf("FPU Trap: PC %lx CR %lx SR %lx\n",
			trapframe->pc, trapframe->cause, trapframe->sr);
		goto err;

	case T_FPE+T_USER:
		MipsFPTrap(trapframe);
		return;

	case T_OVFLOW+T_USER:
		i = SIGFPE;
		typ = FPE_FLTOVF;
		break;

	case T_ADDR_ERR_LD:	/* misaligned access */
	case T_ADDR_ERR_ST:	/* misaligned access */
	case T_BUS_ERR_LD_ST:	/* BERR asserted to cpu */
		pcb = &p->p_addr->u_pcb;
		if ((onfault = pcb->pcb_onfault) != 0) {
			pcb->pcb_onfault = 0;
			trapframe->pc = onfault_table[onfault];
			return;
		}
		goto err;

	default:
	err:
		disableintr();
#if !defined(DDB) && defined(DEBUG)
		trapDump("trap", printf);
#endif
		printf("\nTrap cause = %d Frame %p\n", type, trapframe);
		printf("Trap PC %p RA %p fault %p\n",
		    (void *)trapframe->pc, (void *)trapframe->ra,
		    (void *)trapframe->badvaddr);
#ifdef DDB
		stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs);
		kdb_trap(type, trapframe);
#endif
		panic("trap");
	}

#ifdef FPUEMUL
	/*
	 * If a relocated delay slot causes an exception, blame the
	 * original delay slot address - userland is not supposed to
	 * know anything about emulation bowels.
	 */
	if ((p->p_md.md_flags & MDP_FPUSED) != 0 &&
	    trapframe->badvaddr == p->p_md.md_fppgva)
		trapframe->badvaddr = p->p_md.md_fpslotva;
#endif
	p->p_md.md_regs->pc = trapframe->pc;
	p->p_md.md_regs->cause = trapframe->cause;
	p->p_md.md_regs->badvaddr = trapframe->badvaddr;
	sv.sival_ptr = (void *)trapframe->badvaddr;
	KERNEL_LOCK();
	trapsignal(p, i, ucode, typ, sv);
	KERNEL_UNLOCK();
}