Exemplo n.º 1
0
void
ddb_restore_state()
{
    extern void restoretstate(int, struct trapstate *);

    restoretstate(DDB_REGS->db_tl, &DDB_REGS->db_ts[0]);
    if (fplwp) {
        *fplwp->l_md.md_fpstate = DDB_REGS->db_fpstate;
        loadfpstate(fplwp->l_md.md_fpstate);
    }
}
Exemplo n.º 2
0
void
fill_ddb_regs_from_tf(struct trapframe64 *tf)
{
    extern int savetstate(struct trapstate *);

#ifdef MULTIPROCESSOR
    static db_regs_t ddbregs[CPUSET_MAXNUMCPU];

    curcpu()->ci_ddb_regs = &ddbregs[cpu_number()];
#else
    static db_regs_t ddbregs;

    curcpu()->ci_ddb_regs = &ddbregs;
#endif

    DDB_REGS->db_tf = *tf;
    DDB_REGS->db_fr = *(struct frame64 *)
                      (uintptr_t)tf->tf_out[6];

    if (fplwp) {
        savefpstate(fplwp->l_md.md_fpstate);
        DDB_REGS->db_fpstate = *fplwp->l_md.md_fpstate;
        loadfpstate(fplwp->l_md.md_fpstate);
    }
    /* We should do a proper copyin and xlate 64-bit stack frames, but... */
    /*	if (tf->tf_tstate & TSTATE_PRIV) { .. } */

#if 0
    /* make sure this is not causing ddb problems. */
    if (tf->tf_out[6] & 1) {
        if ((unsigned)(tf->tf_out[6] + BIAS) > (unsigned)KERNBASE)
            DDB_REGS->db_fr = *(struct frame64 *)(tf->tf_out[6] + BIAS);
        else
            copyin((void *)(tf->tf_out[6] + BIAS), &DDB_REGS->db_fr, sizeof(struct frame64));
    } else {
        struct frame32 tfr;
        int i;

        /* First get a local copy of the frame32 */
        if ((unsigned)(tf->tf_out[6]) > (unsigned)KERNBASE)
            tfr = *(struct frame32 *)tf->tf_out[6];
        else
            copyin((void *)(tf->tf_out[6]), &tfr, sizeof(struct frame32));
        /* Now copy each field from the 32-bit value to the 64-bit value */
        for (i=0; i<8; i++)
            DDB_REGS->db_fr.fr_local[i] = tfr.fr_local[i];
        for (i=0; i<6; i++)
            DDB_REGS->db_fr.fr_arg[i] = tfr.fr_arg[i];
        DDB_REGS->db_fr.fr_fp = (long)tfr.fr_fp;
        DDB_REGS->db_fr.fr_pc = tfr.fr_pc;
    }
#endif
    DDB_REGS->db_tl = savetstate(&DDB_REGS->db_ts[0]);
}
Exemplo n.º 3
0
int
netbsd32_cpu_setmcontext(
	struct lwp *l,
	/* XXX const netbsd32_*/mcontext_t *mcp,
	unsigned int flags)
{
#ifdef NOT_YET
/* XXX */
	greg32_t *gr = mcp->__gregs;
	struct trapframe64 *tf = l->l_md.md_tf;

	/* First ensure consistent stack state (see sendsig). */
	write_user_windows();
	if (rwindow_save(p)) {
		mutex_enter(l->l_proc->p_lock);
		sigexit(p, SIGILL);
	}

	if ((flags & _UC_CPU) != 0) {
		/*
	 	 * Only the icc bits in the psr are used, so it need not be
	 	 * verified.  pc and npc must be multiples of 4.  This is all
	 	 * that is required; if it holds, just do it.
		 */
		if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 ||
		    gr[_REG_PC] == 0 || gr[_REG_nPC] == 0)
			return (EINVAL);

		/* Restore general register context. */
		/* take only tstate CCR (and ASI) fields */
		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_CCR) |
		    PSRCC_TO_TSTATE(gr[_REG_PSR]);
		tf->tf_pc        = (uint64_t)gr[_REG_PC];
		tf->tf_npc       = (uint64_t)gr[_REG_nPC];
		tf->tf_y         = (uint64_t)gr[_REG_Y];
		tf->tf_global[1] = (uint64_t)gr[_REG_G1];
		tf->tf_global[2] = (uint64_t)gr[_REG_G2];
		tf->tf_global[3] = (uint64_t)gr[_REG_G3];
		tf->tf_global[4] = (uint64_t)gr[_REG_G4];
		tf->tf_global[5] = (uint64_t)gr[_REG_G5];
		tf->tf_global[6] = (uint64_t)gr[_REG_G6];
		tf->tf_global[7] = (uint64_t)gr[_REG_G7];
		tf->tf_out[0]    = (uint64_t)gr[_REG_O0];
		tf->tf_out[1]    = (uint64_t)gr[_REG_O1];
		tf->tf_out[2]    = (uint64_t)gr[_REG_O2];
		tf->tf_out[3]    = (uint64_t)gr[_REG_O3];
		tf->tf_out[4]    = (uint64_t)gr[_REG_O4];
		tf->tf_out[5]    = (uint64_t)gr[_REG_O5];
		tf->tf_out[6]    = (uint64_t)gr[_REG_O6];
		tf->tf_out[7]    = (uint64_t)gr[_REG_O7];
		/* %asi restored above; %fprs not yet supported. */

		/* XXX mcp->__gwins */
	}

	/* Restore FP register context, if any. */
	if ((flags & _UC_FPU) != 0 && mcp->__fpregs.__fpu_en != 0) {
		struct fpstate *fsp;
		const netbsd32_fpregset_t *fpr = &mcp->__fpregs;
		int reload = 0;

		/*
		 * If we're the current FPU owner, simply reload it from
		 * the supplied context.  Otherwise, store it into the
		 * process' FPU save area (which is used to restore from
		 * by lazy FPU context switching); allocate it if necessary.
		 */
		/*
		 * XXX Should we really activate the supplied FPU context
		 * XXX immediately or just fault it in later?
		 */
		if ((fsp = l->l_md.md_fpstate) == NULL) {
			fsp = pool_cache_get(fpstate_cache, PR_WAITOK);
			l->l_md.md_fpstate = fsp;
		} else {
			/* Drop the live context on the floor. */
			fpusave_lwp(l, false);
			reload = 1;
		}
		/* Note: sizeof fpr->__fpu_fr <= sizeof fsp->fs_regs. */
		memcpy(fsp->fs_regs, fpr->__fpu_fr, sizeof (fpr->__fpu_fr));
		fsp->fs_fsr = fpr->__fpu_fsr;	/* don't care about fcc1-3 */
		fsp->fs_qsize = 0;

#if 0
		/* Need more info! */
		mcp->__fpregs.__fpu_q = NULL;	/* `Need more info.' */
		mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */
#endif

		/* Reload context again, if necessary. */
		if (reload)
			loadfpstate(fsp);
	}

	/* XXX mcp->__xrs */
	/* XXX mcp->__asrs */
#endif
	return (0);
}