示例#1
0
static void mvc_fast_memmove(CPUS390XState *env, uint32_t l, uint64_t dest,
                             uint64_t src)
{
    hwaddr dest_phys;
    hwaddr src_phys;
    hwaddr len = l;
    void *dest_p;
    void *src_p;
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
    int flags;

    if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
        cpu_stb_data(env, dest, 0);
        cpu_abort(env, "should never reach here");
    }
    dest_phys |= dest & ~TARGET_PAGE_MASK;

    if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
        cpu_ldub_data(env, src);
        cpu_abort(env, "should never reach here");
    }
    src_phys |= src & ~TARGET_PAGE_MASK;

    dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
    src_p = cpu_physical_memory_map(src_phys, &len, 0);

    memmove(dest_p, src_p, len);

    cpu_physical_memory_unmap(dest_p, 1, len, len);
    cpu_physical_memory_unmap(src_p, 0, len, len);
}
示例#2
0
static int X86ThreadCanFetch(X86Thread *self) {
	X86Cpu *cpu = self->cpu;
	X86Context *ctx = self->ctx;

	unsigned int phy_addr;
	unsigned int block;

	/* Context must be running */
	if (!ctx || !X86ContextGetState(ctx, X86ContextRunning))
		return 0;

	/* Fetch stalled or context evict signal activated */
	if (self->fetch_stall_until >= asTiming(cpu)->cycle || ctx->evict_signal)
		return 0;

	/* Fetch queue must have not exceeded the limit of stored bytes
	 * to be able to store new macro-instructions. */
	if (self->fetchq_occ >= x86_fetch_queue_size)
		return 0;

	/* If the next fetch address belongs to a new block, cache system
	 * must be accessible to read it. */
	block = self->fetch_neip & ~(self->inst_mod->block_size - 1);

	if (block != self->fetch_block) {
		phy_addr = mmu_translate(self->ctx->address_space_index,
				self->fetch_neip);
		if (!mod_can_access(self->inst_mod, phy_addr))
			return 0;
	}

	/* We can fetch */
	return 1;
}
示例#3
0
hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
{
    S390CPU *cpu = S390_CPU(cs);
    CPUS390XState *env = &cpu->env;
    target_ulong raddr;
    int prot;
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;

    /* 31-Bit mode */
    if (!(env->psw.mask & PSW_MASK_64)) {
        vaddr &= 0x7fffffff;
    }

    mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false);

    return raddr;
}
示例#4
0
target_phys_addr_t cpu_get_phys_page_debug(CPUMBState * env, target_ulong addr)
{
    target_ulong vaddr, paddr = 0;
    struct microblaze_mmu_lookup lu;
    unsigned int hit;

    if (env->sregs[SR_MSR] & MSR_VM) {
        hit = mmu_translate(&env->mmu, &lu, addr, 0, 0);
        if (hit) {
            vaddr = addr & TARGET_PAGE_MASK;
            paddr = lu.paddr + vaddr - lu.vaddr;
        } else
            paddr = 0; /* ???.  */
    } else
        paddr = addr & TARGET_PAGE_MASK;

    return paddr;
}
示例#5
0
文件: debug.cpp 项目: Pear0/firebird
static uint32_t parse_expr(char *str) {
    uint32_t sum = 0;
    int sign = 1;
    if (str == NULL)
        return 0;
    while (*str) {
        int reg;
        if (isxdigit(*str)) {
            sum += sign * strtoul(str, &str, 16);
            sign = 1;
        } else if (*str == '+') {
            str++;
        } else if (*str == '-') {
            sign = -1;
            str++;
        } else if (*str == 'v') {
            sum += sign * mmu_translate(strtoul(str + 1, &str, 16), false, NULL, NULL);
            sign = 1;
        } else if (*str == 'r') {
            reg = strtoul(str + 1, &str, 10);
            if(reg > 15)
            {
                gui_debug_printf("Reg number out of range!\n");
                return 0;
            }
            sum += sign * arm.reg[reg];
            sign = 1;
        } else {
            for (reg = 13; reg < 16; reg++) {
                if (!memcmp(str, reg_name[reg], 2)) {
                    str += 2;
                    sum += sign * arm.reg[reg];
                    sign = 1;
                    goto ok;
                }
            }
            gui_debug_printf("syntax error\n");
            return 0;
ok:;
        }
    }
    return sum;
}
示例#6
0
文件: helper.c 项目: AdrianHuang/qemu
hwaddr mb_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
    CPUMBState *env = &cpu->env;
    target_ulong vaddr, paddr = 0;
    struct microblaze_mmu_lookup lu;
    unsigned int hit;

    if (env->sregs[SR_MSR] & MSR_VM) {
        hit = mmu_translate(&env->mmu, &lu, addr, 0, 0);
        if (hit) {
            vaddr = addr & TARGET_PAGE_MASK;
            paddr = lu.paddr + vaddr - lu.vaddr;
        } else
            paddr = 0; /* ???.  */
    } else
        paddr = addr & TARGET_PAGE_MASK;

    return paddr;
}
示例#7
0
int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
                              int rw, int mmu_idx)
{
    S390CPU *cpu = S390_CPU(cs);
    CPUS390XState *env = &cpu->env;
    uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
    target_ulong vaddr, raddr;
    int prot;

    DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
            __func__, orig_vaddr, rw, mmu_idx);

    orig_vaddr &= TARGET_PAGE_MASK;
    vaddr = orig_vaddr;

    /* 31-Bit mode */
    if (!(env->psw.mask & PSW_MASK_64)) {
        vaddr &= 0x7fffffff;
    }

    if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
        /* Translation ended in exception */
        return 1;
    }

    /* check out of RAM access */
    if (raddr > (ram_size + virtio_size)) {
        DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
                (uint64_t)raddr, (uint64_t)ram_size);
        trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
        return 1;
    }

    qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
            __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);

    tlb_set_page(cs, orig_vaddr, raddr, prot,
                 mmu_idx, TARGET_PAGE_SIZE);

    return 0;
}
示例#8
0
static void mvc_fast_memset(CPUS390XState *env, uint32_t l, uint64_t dest,
                            uint8_t byte)
{
    hwaddr dest_phys;
    hwaddr len = l;
    void *dest_p;
    uint64_t asc = env->psw.mask & PSW_MASK_ASC;
    int flags;

    if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
        cpu_stb_data(env, dest, byte);
        cpu_abort(env, "should never reach here");
    }
    dest_phys |= dest & ~TARGET_PAGE_MASK;

    dest_p = cpu_physical_memory_map(dest_phys, &len, 1);

    memset(dest_p, byte, len);

    cpu_physical_memory_unmap(dest_p, 1, len, len);
}
示例#9
0
int cpu_mb_handle_mmu_fault (CPUMBState *env, target_ulong address, int rw,
                             int mmu_idx)
{
    unsigned int hit;
    unsigned int mmu_available;
    int r = 1;
    int prot;

    mmu_available = 0;
    if (env->pvr.regs[0] & PVR0_USE_MMU) {
        mmu_available = 1;
        if ((env->pvr.regs[0] & PVR0_PVR_FULL_MASK)
            && (env->pvr.regs[11] & PVR11_USE_MMU) != PVR11_USE_MMU) {
            mmu_available = 0;
        }
    }

    /* Translate if the MMU is available and enabled.  */
    if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) {
        target_ulong vaddr, paddr;
        struct microblaze_mmu_lookup lu;

        hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
        if (hit) {
            vaddr = address & TARGET_PAGE_MASK;
            paddr = lu.paddr + vaddr - lu.vaddr;

            DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n",
                     mmu_idx, vaddr, paddr, lu.prot));
            tlb_set_page(env, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
            r = 0;
        } else {
            env->sregs[SR_EAR] = address;
            DMMU(qemu_log("mmu=%d miss v=%x\n", mmu_idx, address));

            switch (lu.err) {
                case ERR_PROT:
                    env->sregs[SR_ESR] = rw == 2 ? 17 : 16;
                    env->sregs[SR_ESR] |= (rw == 1) << 10;
                    break;
                case ERR_MISS:
                    env->sregs[SR_ESR] = rw == 2 ? 19 : 18;
                    env->sregs[SR_ESR] |= (rw == 1) << 10;
                    break;
                default:
                    abort();
                    break;
            }

            if (env->exception_index == EXCP_MMU) {
                cpu_abort(env, "recursive faults\n");
            }

            /* TLB miss.  */
            env->exception_index = EXCP_MMU;
        }
    } else {
        /* MMU disabled or not available.  */
        address &= TARGET_PAGE_MASK;
        prot = PAGE_BITS;
        tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
        r = 0;
    }
    return r;
}
示例#10
0
/* Run the emulation of one x86 macro-instruction and create its uops.
 * If any of the uops is a control uop, this uop will be the return value of
 * the function. Otherwise, the first decoded uop is returned. */
static struct x86_uop_t *X86ThreadFetchInst(X86Thread *self,
		int fetch_trace_cache) {
	X86Cpu *cpu = self->cpu;
	X86Core *core = self->core;
	X86Context *ctx = self->ctx;

	struct x86_uop_t *uop;
	struct x86_uop_t *ret_uop;

	struct x86_uinst_t *uinst;
	int uinst_count;
	int uinst_index;

	/* Functional simulation */
	self->fetch_eip = self->fetch_neip;
	X86ContextSetEip(ctx, self->fetch_eip);
	X86ContextExecute(ctx);
	self->fetch_neip = self->fetch_eip + ctx->inst.size;

	/* If no micro-instruction was generated by this instruction, create a
	 * 'nop' micro-instruction. This makes sure that there is always a micro-
	 * instruction representing the regular control flow of macro-instructions
	 * of the program. It is important for the traces stored in the trace
	 * cache. */
	if (!x86_uinst_list->count)
		x86_uinst_new(ctx, x86_uinst_nop, 0, 0, 0, 0, 0, 0, 0);

	/* Micro-instructions created by the x86 instructions can be found now
	 * in 'x86_uinst_list'. */
	uinst_count = list_count(x86_uinst_list);
	uinst_index = 0;
	ret_uop = NULL;
	while (list_count(x86_uinst_list)) {
		/* Get uinst from head of list */
		uinst = list_remove_at(x86_uinst_list, 0);

		/* Create uop */
		uop = x86_uop_create();
		uop->uinst = uinst;
		assert(uinst->opcode >= 0 && uinst->opcode < x86_uinst_opcode_count);
		uop->flags = x86_uinst_info[uinst->opcode].flags;
		uop->id = cpu->uop_id_counter++;
		uop->id_in_core = core->uop_id_counter++;

		uop->ctx = ctx;
		uop->thread = self;

		uop->mop_count = uinst_count;
		uop->mop_size = ctx->inst.size;
		uop->mop_id = uop->id - uinst_index;
		uop->mop_index = uinst_index;

		uop->eip = self->fetch_eip;
		uop->in_fetch_queue = 1;
		uop->trace_cache = fetch_trace_cache;
		uop->specmode = X86ContextGetState(ctx, X86ContextSpecMode);
		uop->fetch_address = self->fetch_address;
		uop->fetch_access = self->fetch_access;
		uop->neip = ctx->regs->eip;
		uop->pred_neip = self->fetch_neip;
		uop->target_neip = ctx->target_eip;

		/* Process uop dependences and classify them in integer, floating-point,
		 * flags, etc. */
		x86_uop_count_deps(uop);

		/* Calculate physical address of a memory access */
		if (uop->flags & X86_UINST_MEM) {
			if (uinst->address == ctx->mem_mod_low && ctx->mem_mod_low!=0)
            	{ //fprintf(stderr, "%x, low\n", uinst->address);
                 ctx->mem_low = uop->data-(uop->data & (self->data_mod->block_size-1));
            	 uop->addr = uinst->address; 
            	}
            else if (uinst->address == ctx->mem_mod_high && ctx->mem_mod_high!=0 )
                 { //fprintf(stderr, "%x, high\n", uinst->address);
                 	ctx->mem_high = uop->data | (self->data_mod->block_size-1);
                   uop->addr = uinst->address;
                    }	

			else if (!FPGARegCheck(ctx, uop, uinst->address)) {

				if (self->standalone) {
					uop->phy_addr = uinst->address;
					uop->addr = uinst->address;
					mem_read_copy(ctx->mem, uop->addr, 4, &(uop->data));
				} else {
					uop->phy_addr = mmu_translate(
							self->ctx->address_space_index, uinst->address);
					uop->addr = uinst->address;
					mem_read_copy(ctx->mem, uop->addr, 4, &(uop->data));
				}
			}
		}

		/* Trace */
		if (x86_tracing()) {
			char str[MAX_STRING_SIZE];
			char inst_name[MAX_STRING_SIZE];
			char uinst_name[MAX_STRING_SIZE];

			char *str_ptr;

			int str_size;

			str_ptr = str;
			str_size = sizeof str;

			/* Command */
			str_printf(&str_ptr, &str_size, "x86.new_inst id=%lld core=%d",
					uop->id_in_core, core->id);

			/* Speculative mode */
			if (uop->specmode)
				str_printf(&str_ptr, &str_size, " spec=\"t\"");

			/* Macro-instruction name */
			if (!uinst_index) {
				x86_inst_dump_buf(&ctx->inst, inst_name, sizeof inst_name);
				str_printf(&str_ptr, &str_size, " asm=\"%s\"", inst_name);
			}

			/* Rest */
			x86_uinst_dump_buf(uinst, uinst_name, sizeof uinst_name);
			str_printf(&str_ptr, &str_size, " uasm=\"%s\" stg=\"fe\"",
					uinst_name);

			/* Dump */
			x86_trace("%s\n", str);
		}

		/* Select as returned uop */
		if (!ret_uop || (uop->flags & X86_UINST_CTRL))
			ret_uop = uop;

		/* Insert into fetch queue */

		list_add(self->fetch_queue, uop);
		if (fetch_trace_cache)
			self->trace_cache_queue_occ++;

		/* Statistics */
		cpu->num_fetched_uinst++;
		self->num_fetched_uinst++;
		if (fetch_trace_cache)
			self->trace_cache->num_fetched_uinst++;

		/* Next uinst */
		uinst_index++;

	}

	/* Increase fetch queue occupancy if instruction does not come from
	 * trace cache, and return. */
	if (ret_uop && !fetch_trace_cache)
		self->fetchq_occ += ret_uop->mop_size;
	return ret_uop;
}
示例#11
0
static void X86ThreadFetch(X86Thread *self) {
	X86Context *ctx = self->ctx;
	struct x86_uop_t *uop;

	unsigned int phy_addr;
	unsigned int block;
	unsigned int target;

	int taken;

	/* Try to fetch from trace cache first */
	if (x86_trace_cache_present && X86ThreadFetchTraceCache(self))
		return;

	/* If new block to fetch is not the same as the previously fetched (and stored)
	 * block, access the instruction cache. */
	block = self->fetch_neip & ~(self->inst_mod->block_size - 1);
	if (block != self->fetch_block) {

		phy_addr = mmu_translate(self->ctx->address_space_index,
				self->fetch_neip);

		self->fetch_block = block;
		self->fetch_address = phy_addr;
		self->fetch_access = mod_access(self->inst_mod, mod_access_load,
				phy_addr, NULL, NULL, NULL,
				NULL, self->inst_latency, 4);
		self->btb_reads++;

		/* MMU statistics */
		if (*mmu_report_file_name)
			mmu_access_page(phy_addr, mmu_access_execute);
	}

	/* Fetch all instructions within the block up to the first predict-taken branch. */
	while ((self->fetch_neip & ~(self->inst_mod->block_size - 1)) == block) {
		/* If instruction caused context to suspend or finish */
		if (!X86ContextGetState(ctx, X86ContextRunning))
			break;

		/* If fetch queue full, stop fetching */
		if (self->fetchq_occ >= x86_fetch_queue_size)
			break;

		/* Insert macro-instruction into the fetch queue. Since the macro-instruction
		 * information is only available at this point, we use it to decode
		 * instruction now and insert uops into the fetch queue. However, the
		 * fetch queue occupancy is increased with the macro-instruction size. */
		uop = X86ThreadFetchInst(self, 0);
		if (!ctx->inst.size) /* x86_isa_inst invalid - no forward progress in loop */
			break;
		if (!uop) /* no uop was produced by this macro-instruction */
			continue;

		/* Instruction detected as branches by the BTB are checked for branch
		 * direction in the branch predictor. If they are predicted taken,
		 * stop fetching from this block and set new fetch address. */
		if (uop->flags & X86_UINST_CTRL) {
			target = X86ThreadLookupBTB(self, uop);
			taken = target && X86ThreadLookupBranchPred(self, uop);
			if (taken) {
				self->fetch_neip = target;
				uop->pred_neip = target;
				break;
			}
		}
	}
}
示例#12
0
文件: debug.cpp 项目: Pear0/firebird
void *virt_mem_ptr(uint32_t addr, uint32_t size) {
    // Note: this is not guaranteed to be correct when range crosses page boundary
    return (void *)(intptr_t)phys_mem_ptr(mmu_translate(addr, false, NULL, NULL), size);
}
示例#13
0
int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
                            int mmu_idx)
{
    MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
    CPUMBState *env = &cpu->env;
    unsigned int hit;
    unsigned int mmu_available;
    int r = 1;
    int prot;

    mmu_available = 0;
    if (cpu->cfg.use_mmu) {
        mmu_available = 1;
        if ((cpu->cfg.pvr == C_PVR_FULL) &&
            (env->pvr.regs[11] & PVR11_USE_MMU) != PVR11_USE_MMU) {
            mmu_available = 0;
        }
    }

    /* Translate if the MMU is available and enabled.  */
    if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) {
        target_ulong vaddr, paddr;
        struct microblaze_mmu_lookup lu;

        hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
        if (hit) {
            vaddr = address & TARGET_PAGE_MASK;
            paddr = lu.paddr + vaddr - lu.vaddr;

            qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
                    mmu_idx, vaddr, paddr, lu.prot);
            tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
            r = 0;
        } else {
            env->sregs[SR_EAR] = address;
            qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
                                        mmu_idx, address);

            switch (lu.err) {
                case ERR_PROT:
                    env->sregs[SR_ESR] = rw == 2 ? 17 : 16;
                    env->sregs[SR_ESR] |= (rw == 1) << 10;
                    break;
                case ERR_MISS:
                    env->sregs[SR_ESR] = rw == 2 ? 19 : 18;
                    env->sregs[SR_ESR] |= (rw == 1) << 10;
                    break;
                default:
                    abort();
                    break;
            }

            if (cs->exception_index == EXCP_MMU) {
                cpu_abort(cs, "recursive faults\n");
            }

            /* TLB miss.  */
            cs->exception_index = EXCP_MMU;
        }
    } else {
        /* MMU disabled or not available.  */
        address &= TARGET_PAGE_MASK;
        prot = PAGE_BITS;
        tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
        r = 0;
    }
    return r;
}