Example #1
0
void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
{
    uintptr_t ra = GETPC();

    if ((a0 & 0xf) != 0) {
        raise_exception_ra(env, EXCP0D_GPF, ra);
    } else {
#ifndef CONFIG_ATOMIC128
        cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
#else
        int eflags = cpu_cc_compute_all(env, CC_OP);

        Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
        Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);

        int mem_idx = cpu_mmu_index(env, false);
        TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
        Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv,
                                                    newv, oi, ra);

        if (int128_eq(oldv, cmpv)) {
            eflags |= CC_Z;
        } else {
            env->regs[R_EAX] = int128_getlo(oldv);
            env->regs[R_EDX] = int128_gethi(oldv);
            eflags &= ~CC_Z;
        }
        CC_SRC = eflags;
#endif
    }
}
Example #2
0
static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
                         uint32_t l)
{
    int mmu_idx = cpu_mmu_index(env, false);

    while (l > 0) {
        void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
        void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
        if (src_p && dest_p) {
            /* Access to both whole pages granted.  */
            int l_adj = adj_len_to_page(l, src);
            l_adj = adj_len_to_page(l_adj, dest);
            memmove(dest_p, src_p, l_adj);
            src += l_adj;
            dest += l_adj;
            l -= l_adj;
        } else {
            /* We failed to get access to one or both whole pages. The next
               read or write access will likely fill the QEMU TLB for the
               next iteration.  */
            cpu_stb_data(env, dest, cpu_ldub_data(env, src));
            src++;
            dest++;
            l--;
        }
    }
}
Example #3
0
static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
                       unsigned int size)
{
    int mem_index = cpu_mmu_index(dc->env);

    /* If we get a fault on a delayslot we must keep the jmp state in
       the cpu-state to be able to re-execute the jmp.  */
    if (dc->delayed_branch == 1) {
        cris_store_direct_jmp(dc);
    }

    /* Conditional writes. We only support the kind were X is known
       at translation time.  */
    if (dc->flagx_known && dc->flags_x) {
        gen_store_v10_conditional(dc, addr, val, size, mem_index);
        return;
    }

    if (size == 1) {
        tcg_gen_qemu_st8(val, addr, mem_index);
    } else if (size == 2) {
        tcg_gen_qemu_st16(val, addr, mem_index);
    } else {
        tcg_gen_qemu_st32(val, addr, mem_index);
    }
}
Example #4
0
/* NOTE2: the returned address is not exactly the physical address: it
 * is actually a ram_addr_t (in system mode; the user mode emulation
 * version of this function returns a guest virtual address).
 */
tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
{
    uintptr_t mmu_idx = cpu_mmu_index(env, true);
    uintptr_t index = tlb_index(env, mmu_idx, addr);
    CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
    void *p;

    if (unlikely(!tlb_hit(entry->addr_code, addr))) {
        if (!VICTIM_TLB_HIT(addr_code, addr)) {
            tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
            index = tlb_index(env, mmu_idx, addr);
            entry = tlb_entry(env, mmu_idx, addr);
        }
        assert(tlb_hit(entry->addr_code, addr));
    }

    if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) {
        /*
         * Return -1 if we can't translate and execute from an entire
         * page of RAM here, which will cause us to execute by loading
         * and translating one insn at a time, without caching:
         *  - TLB_RECHECK: means the MMU protection covers a smaller range
         *    than a target page, so we must redo the MMU check every insn
         *  - TLB_MMIO: region is not backed by RAM
         */
        return -1;
    }

    p = (void *)((uintptr_t)addr + entry->addend);
    return qemu_ram_addr_from_host_nofail(p);
}
Example #5
0
void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
{
#ifdef CONFIG_ATOMIC64
    uint64_t oldv, cmpv, newv;
    int eflags;

    eflags = cpu_cc_compute_all(env, CC_OP);

    cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
    newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);

#ifdef CONFIG_USER_ONLY
    {
        uint64_t *haddr = g2h(a0);
        cmpv = cpu_to_le64(cmpv);
        newv = cpu_to_le64(newv);
        oldv = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
        oldv = le64_to_cpu(oldv);
    }
#else
    {
        uintptr_t ra = GETPC();
        int mem_idx = cpu_mmu_index(env, false);
        TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
        oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
    }
#endif

    if (oldv == cmpv) {
        eflags |= CC_Z;
    } else {
        env->regs[R_EAX] = (uint32_t)oldv;
        env->regs[R_EDX] = (uint32_t)(oldv >> 32);
        eflags &= ~CC_Z;
    }
    CC_SRC = eflags;
#else
    cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
#endif /* CONFIG_ATOMIC64 */
}
Example #6
0
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
                        uint32_t l)
{
    int mmu_idx = cpu_mmu_index(env, false);

    while (l > 0) {
        void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
        if (p) {
            /* Access to the whole page in write mode granted.  */
            int l_adj = adj_len_to_page(l, dest);
            memset(p, byte, l_adj);
            dest += l_adj;
            l -= l_adj;
        } else {
            /* We failed to get access to the whole page. The next write
               access will likely fill the QEMU TLB for the next iteration.  */
            cpu_stb_data(env, dest, byte);
            dest++;
            l--;
        }
    }
}
Example #7
0
/* generate intermediate code for basic block 'tb'.  */
static void gen_intermediate_code_internal(
    Nios2CPU *cpu, TranslationBlock *tb, int search_pc)
{
    CPUState *cs = CPU(cpu);
    CPUNios2State *env = &cpu->env;
    DisasContext dc1, *dc = &dc1;
    int num_insns;
    int max_insns;
    uint32_t next_page_start;
    int j, lj = -1;
    uint16_t *gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;

    /* Initialize DC */
    dc->cpu_env = cpu_env;
    dc->cpu_R   = cpu_R;
    dc->is_jmp  = DISAS_NEXT;
    dc->pc      = tb->pc;
    dc->tb      = tb;
    dc->mem_idx = cpu_mmu_index(env);

    /* Dump the CPU state to the log */
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
        qemu_log("--------------\n");
        log_cpu_state(CPU(env), 0);
    }

    /* Set up instruction counts */
    num_insns = 0;
    max_insns = tb->cflags & CF_COUNT_MASK;
    if (max_insns == 0) {
        max_insns = CF_COUNT_MASK;
    }
    next_page_start = (tb->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;

    gen_tb_start();
    do {
        /* Mark instruction start with associated PC */
        if (search_pc) {
            j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
            if (lj < j) {
                lj++;
                while (lj < j) {
                    tcg_ctx.gen_opc_instr_start[lj++] = 0;
                }
            }
            tcg_ctx.gen_opc_pc[lj] = dc->pc;
            tcg_ctx.gen_opc_instr_start[lj] = 1;
            tcg_ctx.gen_opc_icount[lj] = num_insns;
        }

        LOG_DIS("%8.8x:\t", dc->pc);

        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
            gen_io_start();
        }

        /* Decode an instruction */
        handle_instruction(dc, env);

        dc->pc += 4;
        num_insns++;

        /* Translation stops when a conditional branch is encountered.
         * Otherwise the subsequent code could get translated several times.
         * Also stop translation when a page boundary is reached.  This
         * ensures prefetch aborts occur at the right place.  */
    } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
             !cs->singlestep_enabled &&
             !singlestep &&
             dc->pc < next_page_start &&
             num_insns < max_insns);

    if (tb->cflags & CF_LAST_IO) {
        gen_io_end();
    }

    /* Indicate where the next block should start */
    switch (dc->is_jmp) {
    case DISAS_NEXT:
        /* Save the current PC back into the CPU register */
        tcg_gen_movi_tl(cpu_R[R_PC], dc->pc);
        tcg_gen_exit_tb(0);
        break;

    default:
    case DISAS_JUMP:
    case DISAS_UPDATE:
        /* The jump will already have updated the PC register */
        tcg_gen_exit_tb(0);
        break;

    case DISAS_TB_JUMP:
        /* nothing more to generate */
        break;
    }

    /* End off the block */
    gen_tb_end(tb, num_insns);
    *tcg_ctx.gen_opc_ptr = INDEX_op_end;

    /* Mark instruction starts for the final generated instruction */
    if (search_pc) {
        j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
        lj++;
        while (lj <= j) {
            tcg_ctx.gen_opc_instr_start[lj++] = 0;
        }
    } else {
        tb->size = dc->pc - tb->pc;
        tb->icount = num_insns;
    }

#ifdef DEBUG_DISAS
    if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
        qemu_log("----------------\n");
        qemu_log("IN: %s\n", lookup_symbol(tb->pc));
        log_target_disas(env, tb->pc, dc->pc - tb->pc, 0);
        qemu_log("\nisize=%d osize=%td\n",
                 dc->pc - tb->pc, tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf);
    }
#endif
}