Example #1
0
/* Helper routine for the x86.asm PUSH_DR_MCONTEXT, to fill in the xmm0-5 values
 * (or all for linux) (or ymm) only if necessary.
 */
void
get_xmm_vals(priv_mcontext_t *mc)
{
    if (preserve_xmm_caller_saved()) {
        ASSERT(proc_has_feature(FEATURE_SSE));
        if (YMM_ENABLED())
            get_ymm_caller_saved(&mc->ymm[0]);
        else
            get_xmm_caller_saved(&mc->ymm[0]);
    }
}
Example #2
0
void
mcontext_to_sigcontext_simd(sig_full_cxt_t *sc_full, priv_mcontext_t *mc)
{
    sigcontext_t *sc = sc_full->sc;
    int i;
    for (i = 0; i < MCXT_NUM_SIMD_SLOTS; i++) {
        memcpy(&sc->__fs.__fpu_xmm0 + i, &mc->ymm[i], XMM_REG_SIZE);
    }
    if (YMM_ENABLED()) {
        for (i = 0; i < MCXT_NUM_SIMD_SLOTS; i++) {
            memcpy(&sc->__fs.__fpu_ymmh0 + i, &mc->ymm[i].u32[4], YMMH_REG_SIZE);
        }
    }
}
Example #3
0
/* Helper routine for the x86.asm PUSH_DR_MCONTEXT, to fill in the xmm0-5 values
 * (or all for linux) (or ymm) only if necessary.
 */
void
get_xmm_vals(priv_mcontext_t *mc)
{
#ifdef X86
    if (preserve_xmm_caller_saved()) {
        ASSERT(proc_has_feature(FEATURE_SSE));
        if (YMM_ENABLED())
            get_ymm_caller_saved(&mc->ymm[0]);
        else
            get_xmm_caller_saved(&mc->ymm[0]);
    }
#elif defined(ARM)
    /* FIXME i#1551: no xmm but SIMD regs on ARM */
    ASSERT_NOT_REACHED();
#endif
}
Example #4
0
void
sigcontext_to_mcontext_simd(priv_mcontext_t *mc, sig_full_cxt_t *sc_full)
{
    /* We assume that _STRUCT_X86_FLOAT_STATE* matches exactly the first
     * half of _STRUCT_X86_AVX_STATE*.
     */
    sigcontext_t *sc = sc_full->sc;
    int i;
    for (i = 0; i < MCXT_NUM_SIMD_SLOTS; i++) {
        memcpy(&mc->ymm[i], &sc->__fs.__fpu_xmm0 + i, XMM_REG_SIZE);
    }
    if (YMM_ENABLED()) {
        for (i = 0; i < MCXT_NUM_SIMD_SLOTS; i++) {
            memcpy(&mc->ymm[i].u32[4], &sc->__fs.__fpu_ymmh0 + i, YMMH_REG_SIZE);
        }
    }
}
Example #5
0
static void
dump_fpstate(dcontext_t *dcontext, sigcontext_t *sc)
{
    int i, j;
    LOG(THREAD, LOG_ASYNCH, 1, "\tfcw=0x%04x\n", *(ushort *)&sc->__fs.__fpu_fcw);
    LOG(THREAD, LOG_ASYNCH, 1, "\tfsw=0x%04x\n", *(ushort *)&sc->__fs.__fpu_fsw);
    LOG(THREAD, LOG_ASYNCH, 1, "\tftw=0x%02x\n", sc->__fs.__fpu_ftw);
    LOG(THREAD, LOG_ASYNCH, 1, "\tfop=0x%04x\n", sc->__fs.__fpu_fop);
    LOG(THREAD, LOG_ASYNCH, 1, "\tip=0x%08x\n", sc->__fs.__fpu_ip);
    LOG(THREAD, LOG_ASYNCH, 1, "\tcs=0x%04x\n", sc->__fs.__fpu_cs);
    LOG(THREAD, LOG_ASYNCH, 1, "\tdp=0x%08x\n", sc->__fs.__fpu_dp);
    LOG(THREAD, LOG_ASYNCH, 1, "\tds=0x%04x\n", sc->__fs.__fpu_ds);
    LOG(THREAD, LOG_ASYNCH, 1, "\tmxcsr=0x%08x\n", sc->__fs.__fpu_mxcsr);
    LOG(THREAD, LOG_ASYNCH, 1, "\tmxcsrmask=0x%08x\n", sc->__fs.__fpu_mxcsrmask);
    for (i = 0; i < 8; i++) {
        LOG(THREAD, LOG_ASYNCH, 1, "\tst%d = ", i);
        for (j = 0; j < 5; j++) {
            LOG(THREAD, LOG_ASYNCH, 1, "%04x ",
                *((ushort *)(&sc->__fs.__fpu_stmm0 + i) + j));
        }
        LOG(THREAD, LOG_ASYNCH, 1, "\n");
    }
    for (i = 0; i < MCXT_NUM_SIMD_SLOTS; i++) {
        LOG(THREAD, LOG_ASYNCH, 1, "\txmm%d = ", i);
        for (j = 0; j < 4; j++) {
            LOG(THREAD, LOG_ASYNCH, 1, "%08x ",
                *((uint *)(&sc->__fs.__fpu_xmm0 + i) + j));
        }
        LOG(THREAD, LOG_ASYNCH, 1, "\n");
    }
    if (YMM_ENABLED()) {
        for (i = 0; i < MCXT_NUM_SIMD_SLOTS; i++) {
            LOG(THREAD, LOG_ASYNCH, 1, "\tymmh%d = ", i);
            for (j = 0; j < 4; j++) {
                LOG(THREAD, LOG_ASYNCH, 1, "%08x ",
                    *((uint *)(&sc->__fs.__fpu_ymmh0 + i) + j));
            }
            LOG(THREAD, LOG_ASYNCH, 1, "\n");
        }
    }
}