Ejemplo n.º 1
0
/* write lakemont core shadow ram reg, update reg cache if needed */
static int write_hw_reg(struct target *t, int reg, uint32_t regval, uint8_t cache)
{
	struct x86_32_common *x86_32 = target_to_x86_32(t);
	struct lakemont_core_reg *arch_info;
	arch_info = x86_32->cache->reg_list[reg].arch_info;

	uint8_t reg_buf[4];
	if (cache)
		regval = buf_get_u32(x86_32->cache->reg_list[reg].value, 0, 32);
	buf_set_u32(reg_buf, 0, 32, regval);
	LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32 " cache=0x%08" PRIx8,
			x86_32->cache->reg_list[reg].name, arch_info->op, regval, cache);

	x86_32->flush = 0; /* dont flush scans till we have a batch */
	if (submit_reg_pir(t, reg) != ERROR_OK)
		return ERROR_FAIL;
	if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
		return ERROR_FAIL;
	scan.out[0] = RDWRPDR;
	if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
		return ERROR_FAIL;
	if (drscan(t, reg_buf, scan.out, PDR_SIZE) != ERROR_OK)
		return ERROR_FAIL;
	x86_32->flush = 1;
	if (submit_instruction_pir(t, PDR2SRAM) != ERROR_OK)
		return ERROR_FAIL;

	/* we are writing from the cache so ensure we reset flags */
	if (cache) {
		x86_32->cache->reg_list[reg].dirty = 0;
		x86_32->cache->reg_list[reg].valid = 0;
	}
	return ERROR_OK;
}
Ejemplo n.º 2
0
/* read reg from lakemont core shadow ram, update reg cache if needed */
static int read_hw_reg(struct target *t, int reg, uint32_t *regval, uint8_t cache)
{
	struct x86_32_common *x86_32 = target_to_x86_32(t);
	struct lakemont_core_reg *arch_info;
	arch_info = x86_32->cache->reg_list[reg].arch_info;
	x86_32->flush = 0; /* dont flush scans till we have a batch */
	if (submit_reg_pir(t, reg) != ERROR_OK)
		return ERROR_FAIL;
	if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK)
		return ERROR_FAIL;
	if (submit_instruction_pir(t, SRAM2PDR) != ERROR_OK)
		return ERROR_FAIL;
	x86_32->flush = 1;
	scan.out[0] = RDWRPDR;
	if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK)
		return ERROR_FAIL;
	if (drscan(t, NULL, scan.out, PDR_SIZE) != ERROR_OK)
		return ERROR_FAIL;

	jtag_add_sleep(DELAY_SUBMITPIR);
	*regval = buf_get_u32(scan.out, 0, 32);
	if (cache) {
		buf_set_u32(x86_32->cache->reg_list[reg].value, 0, 32, *regval);
		x86_32->cache->reg_list[reg].valid = 1;
		x86_32->cache->reg_list[reg].dirty = 0;
	}
	LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32,
			x86_32->cache->reg_list[reg].name,
			arch_info->op,
			*regval);
	return ERROR_OK;
}
Ejemplo n.º 3
0
/* do whats needed to properly enter probemode for debug on lakemont */
static int halt_prep(struct target *t)
{
	struct x86_32_common *x86_32 = target_to_x86_32(t);

	if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB);
	if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL);
	if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR);
	if (write_hw_reg(t, CSB, PM_DSB, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSB].name, PM_DSB);
	if (write_hw_reg(t, CSL, PM_DSL, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSL].name, PM_DSL);

	if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7);

	uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
	uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32);
	uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32);
	uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);

	/* clear VM86 and IF bits if they are set */
	LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags,
			eflags & EFLAGS_VM86 ? 1 : 0,
			eflags & EFLAGS_IF ? 1 : 0);
	if ((eflags & EFLAGS_VM86) || (eflags & EFLAGS_IF)) {
		x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF);
		if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d",
				x86_32->pm_regs[I(EFLAGS)],
				x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0,
				x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0);
	}

	/* set CPL to 0 for memory access */
	if (csar & CSAR_DPL) {
		x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL;
		if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]);
	}
	if (ssar & SSAR_DPL) {
		x86_32->pm_regs[I(SSAR)] = ssar & ~SSAR_DPL;
		if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]);
	}

	/* if cache's are enabled, disable and flush, depending on the LMT core version */
	/* TODO: we never disable and flush for LMT3.5 and always do for LMT1, but to
	 * be clean, on LMT3.5 and later we should check bit 20 of TAPSTATUS
	 * (1 == don't flush the cache) and decide what action to take (see LDO 3.7.1.5).
	 * We can update the code once we know if we ever need to support LMT2/3 or a
	 * new LMT architecture pops up.
	 */
	if (!(x86_32->core_type == LMT3_5) && !(cr0 & CR0_CD)) {
		LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0);
		if (cr0 & CR0_PG) {
			x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG;
			if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
				return ERROR_FAIL;
			LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
			/* submit wbinvd to flush cache */
			if (submit_reg_pir(t, WBINVD) != ERROR_OK)
				return ERROR_FAIL;
			x86_32->pm_regs[I(CR0)] =
				x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG);
			if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
				return ERROR_FAIL;
			LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
		}
	}
	return ERROR_OK;
}
Ejemplo n.º 4
0
/* do whats needed to properly enter probemode for debug on lakemont */
static int halt_prep(struct target *t)
{
	struct x86_32_common *x86_32 = target_to_x86_32(t);
	if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB);
	if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL);
	if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR);
	if (write_hw_reg(t, CSB, PM_DSB, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSB].name, PM_DSB);
	if (write_hw_reg(t, CSL, PM_DSL, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSL].name, PM_DSL);
	if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7);

	uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
	uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32);
	uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32);
	uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);

	/* clear VM86 and IF bits if they are set */
	LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags,
			eflags & EFLAGS_VM86 ? 1 : 0,
			eflags & EFLAGS_IF ? 1 : 0);
	if ((eflags & EFLAGS_VM86) || (eflags & EFLAGS_IF)) {
		x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF);
		if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d",
				x86_32->pm_regs[I(EFLAGS)],
				x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0,
				x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0);
	}

	/* set CPL to 0 for memory access */
	if (csar & CSAR_DPL) {
		x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL;
		if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]);
	}
	if (ssar & SSAR_DPL) {
		x86_32->pm_regs[I(SSAR)] = ssar & ~SSAR_DPL;
		if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]);
	}

	/* if cache's are enabled, disable and flush, depending on the core version */
	if (!(x86_32->core_type == LMT3_5) && !(cr0 & CR0_CD)) {
		LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0);
		if (cr0 & CR0_PG) {
			x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG;
			if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
				return ERROR_FAIL;
			LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
			/* submit wbinvd to flush cache */
			if (submit_reg_pir(t, WBINVD) != ERROR_OK)
				return ERROR_FAIL;
			x86_32->pm_regs[I(CR0)] =
				x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG);
			if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
				return ERROR_FAIL;
			LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
		}
	}
	return ERROR_OK;
}