Esempio n. 1
0
static int write_all_core_hw_regs(struct target *t)
{
	int err;
	unsigned i;
	struct x86_32_common *x86_32 = target_to_x86_32(t);
	for (i = 0; i < (x86_32->cache->num_regs); i++) {
		if (NOT_AVAIL_REG == regs[i].pm_idx)
			continue;
		err = write_hw_reg(t, i, 0, 1);
		if (err != ERROR_OK) {
			LOG_ERROR("%s error restoring reg %s",
					__func__, x86_32->cache->reg_list[i].name);
			return err;
		}
	}
	LOG_DEBUG("write_all_core_hw_regs wrote %u registers ok", i);
	return ERROR_OK;
}
Esempio n. 2
0
/* do whats needed to properly enter probemode for debug on lakemont */
static int halt_prep(struct target *t)
{
	struct x86_32_common *x86_32 = target_to_x86_32(t);

	if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB);
	if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL);
	if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR);
	if (write_hw_reg(t, CSB, PM_DSB, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSB].name, PM_DSB);
	if (write_hw_reg(t, CSL, PM_DSL, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSL].name, PM_DSL);

	if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7);

	uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
	uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32);
	uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32);
	uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);

	/* clear VM86 and IF bits if they are set */
	LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags,
			eflags & EFLAGS_VM86 ? 1 : 0,
			eflags & EFLAGS_IF ? 1 : 0);
	if ((eflags & EFLAGS_VM86) || (eflags & EFLAGS_IF)) {
		x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF);
		if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d",
				x86_32->pm_regs[I(EFLAGS)],
				x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0,
				x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0);
	}

	/* set CPL to 0 for memory access */
	if (csar & CSAR_DPL) {
		x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL;
		if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]);
	}
	if (ssar & SSAR_DPL) {
		x86_32->pm_regs[I(SSAR)] = ssar & ~SSAR_DPL;
		if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]);
	}

	/* if cache's are enabled, disable and flush, depending on the LMT core version */
	/* TODO: we never disable and flush for LMT3.5 and always do for LMT1, but to
	 * be clean, on LMT3.5 and later we should check bit 20 of TAPSTATUS
	 * (1 == don't flush the cache) and decide what action to take (see LDO 3.7.1.5).
	 * We can update the code once we know if we ever need to support LMT2/3 or a
	 * new LMT architecture pops up.
	 */
	if (!(x86_32->core_type == LMT3_5) && !(cr0 & CR0_CD)) {
		LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0);
		if (cr0 & CR0_PG) {
			x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG;
			if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
				return ERROR_FAIL;
			LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
			/* submit wbinvd to flush cache */
			if (submit_reg_pir(t, WBINVD) != ERROR_OK)
				return ERROR_FAIL;
			x86_32->pm_regs[I(CR0)] =
				x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG);
			if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
				return ERROR_FAIL;
			LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
		}
	}
	return ERROR_OK;
}
Esempio n. 3
0
/* do whats needed to properly enter probemode for debug on lakemont */
static int halt_prep(struct target *t)
{
	struct x86_32_common *x86_32 = target_to_x86_32(t);
	if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB);
	if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL);
	if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR);
	if (write_hw_reg(t, CSB, PM_DSB, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSB].name, PM_DSB);
	if (write_hw_reg(t, CSL, PM_DSL, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSL].name, PM_DSL);
	if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK)
		return ERROR_FAIL;
	LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7);

	uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32);
	uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32);
	uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32);
	uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32);

	/* clear VM86 and IF bits if they are set */
	LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags,
			eflags & EFLAGS_VM86 ? 1 : 0,
			eflags & EFLAGS_IF ? 1 : 0);
	if ((eflags & EFLAGS_VM86) || (eflags & EFLAGS_IF)) {
		x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF);
		if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d",
				x86_32->pm_regs[I(EFLAGS)],
				x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0,
				x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0);
	}

	/* set CPL to 0 for memory access */
	if (csar & CSAR_DPL) {
		x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL;
		if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]);
	}
	if (ssar & SSAR_DPL) {
		x86_32->pm_regs[I(SSAR)] = ssar & ~SSAR_DPL;
		if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK)
			return ERROR_FAIL;
		LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]);
	}

	/* if cache's are enabled, disable and flush, depending on the core version */
	if (!(x86_32->core_type == LMT3_5) && !(cr0 & CR0_CD)) {
		LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0);
		if (cr0 & CR0_PG) {
			x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG;
			if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
				return ERROR_FAIL;
			LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
			/* submit wbinvd to flush cache */
			if (submit_reg_pir(t, WBINVD) != ERROR_OK)
				return ERROR_FAIL;
			x86_32->pm_regs[I(CR0)] =
				x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG);
			if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK)
				return ERROR_FAIL;
			LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]);
		}
	}
	return ERROR_OK;
}