/* read reg from lakemont core shadow ram, update reg cache if needed */ static int read_hw_reg(struct target *t, int reg, uint32_t *regval, uint8_t cache) { struct x86_32_common *x86_32 = target_to_x86_32(t); struct lakemont_core_reg *arch_info; arch_info = x86_32->cache->reg_list[reg].arch_info; x86_32->flush = 0; /* dont flush scans till we have a batch */ if (submit_reg_pir(t, reg) != ERROR_OK) return ERROR_FAIL; if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK) return ERROR_FAIL; if (submit_instruction_pir(t, SRAM2PDR) != ERROR_OK) return ERROR_FAIL; x86_32->flush = 1; scan.out[0] = RDWRPDR; if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK) return ERROR_FAIL; if (drscan(t, NULL, scan.out, PDR_SIZE) != ERROR_OK) return ERROR_FAIL; jtag_add_sleep(DELAY_SUBMITPIR); *regval = buf_get_u32(scan.out, 0, 32); if (cache) { buf_set_u32(x86_32->cache->reg_list[reg].value, 0, 32, *regval); x86_32->cache->reg_list[reg].valid = 1; x86_32->cache->reg_list[reg].dirty = 0; } LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32, x86_32->cache->reg_list[reg].name, arch_info->op, *regval); return ERROR_OK; }
static int irscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t ir_len) { int retval = ERROR_OK; struct x86_32_common *x86_32 = target_to_x86_32(t); if (NULL == t->tap) { retval = ERROR_FAIL; LOG_ERROR("%s invalid target tap", __func__); return retval; } if (ir_len != t->tap->ir_length) { retval = ERROR_FAIL; if (t->tap->enabled) LOG_ERROR("%s tap enabled but tap irlen=%d", __func__, t->tap->ir_length); else LOG_ERROR("%s tap not enabled and irlen=%d", __func__, t->tap->ir_length); return retval; } struct scan_field *fields = &scan.field; fields->num_bits = ir_len; fields->out_value = out; fields->in_value = in; jtag_add_ir_scan(x86_32->curr_tap, fields, TAP_IDLE); if (x86_32->flush) { retval = jtag_execute_queue(); if (retval != ERROR_OK) LOG_ERROR("%s failed to execute queue", __func__); } return retval; }
struct reg_cache *lakemont_build_reg_cache(struct target *t) { struct x86_32_common *x86_32 = target_to_x86_32(t); int num_regs = ARRAY_SIZE(regs); struct reg_cache **cache_p = register_get_last_cache_p(&t->reg_cache); struct reg_cache *cache = malloc(sizeof(struct reg_cache)); struct reg *reg_list = malloc(sizeof(struct reg) * num_regs); struct lakemont_core_reg *arch_info = malloc(sizeof(struct lakemont_core_reg) * num_regs); struct reg_feature *feature; int i; if (cache == NULL || reg_list == NULL || arch_info == NULL) { free(cache); free(reg_list); free(arch_info); LOG_ERROR("%s out of memory", __func__); return NULL; } /* Build the process context cache */ cache->name = "lakemont registers"; cache->next = NULL; cache->reg_list = reg_list; cache->num_regs = num_regs; (*cache_p) = cache; x86_32->cache = cache; for (i = 0; i < num_regs; i++) { arch_info[i].target = t; arch_info[i].x86_32_common = x86_32; arch_info[i].op = regs[i].op; arch_info[i].pm_idx = regs[i].pm_idx; reg_list[i].name = regs[i].name; reg_list[i].size = 32; reg_list[i].value = calloc(1, 4); reg_list[i].dirty = 0; reg_list[i].valid = 0; reg_list[i].type = &lakemont_reg_type; reg_list[i].arch_info = &arch_info[i]; reg_list[i].group = regs[i].group; reg_list[i].number = i; reg_list[i].exist = true; reg_list[i].caller_save = true; /* gdb defaults to true */ feature = calloc(1, sizeof(struct reg_feature)); if (feature) { feature->name = regs[i].feature; reg_list[i].feature = feature; } else LOG_ERROR("%s unable to allocate feature list", __func__); reg_list[i].reg_data_type = calloc(1, sizeof(struct reg_data_type)); if (reg_list[i].reg_data_type) reg_list[i].reg_data_type->type = regs[i].type; else LOG_ERROR("%s unable to allocate reg type list", __func__); } return cache; }
/* write lakemont core shadow ram reg, update reg cache if needed */ static int write_hw_reg(struct target *t, int reg, uint32_t regval, uint8_t cache) { struct x86_32_common *x86_32 = target_to_x86_32(t); struct lakemont_core_reg *arch_info; arch_info = x86_32->cache->reg_list[reg].arch_info; uint8_t reg_buf[4]; if (cache) regval = buf_get_u32(x86_32->cache->reg_list[reg].value, 0, 32); buf_set_u32(reg_buf, 0, 32, regval); LOG_DEBUG("reg=%s, op=0x%016" PRIx64 ", val=0x%08" PRIx32 " cache=0x%08" PRIx8, x86_32->cache->reg_list[reg].name, arch_info->op, regval, cache); x86_32->flush = 0; /* dont flush scans till we have a batch */ if (submit_reg_pir(t, reg) != ERROR_OK) return ERROR_FAIL; if (submit_instruction_pir(t, SRAMACCESS) != ERROR_OK) return ERROR_FAIL; scan.out[0] = RDWRPDR; if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK) return ERROR_FAIL; if (drscan(t, reg_buf, scan.out, PDR_SIZE) != ERROR_OK) return ERROR_FAIL; x86_32->flush = 1; if (submit_instruction_pir(t, PDR2SRAM) != ERROR_OK) return ERROR_FAIL; /* we are writing from the cache so ensure we reset flags */ if (cache) { x86_32->cache->reg_list[reg].dirty = 0; x86_32->cache->reg_list[reg].valid = 0; } return ERROR_OK; }
static bool is_paging_enabled(struct target *t) { struct x86_32_common *x86_32 = target_to_x86_32(t); if (x86_32->pm_regs[I(CR0)] & CR0_PG) return true; else return false; }
static bool io_bpts_supported(struct target *t) { struct x86_32_common *x86_32 = target_to_x86_32(t); if (x86_32->core_type == LMT3_5) return true; else return false; }
static int enable_paging(struct target *t) { struct x86_32_common *x86_32 = target_to_x86_32(t); x86_32->pm_regs[I(CR0)] = (x86_32->pm_regs[I(CR0)] | CR0_PG); int err = x86_32->write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0); if (err != ERROR_OK) { LOG_ERROR("%s error enabling paging", __func__); return err; } return err; }
int lakemont_arch_state(struct target *t) { struct x86_32_common *x86_32 = target_to_x86_32(t); LOG_USER("target halted due to %s at 0x%08" PRIx32 " in %s mode", debug_reason_name(t), buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32), (buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32) & CR0_PE) ? "protected" : "real"); return ERROR_OK; }
static int write_all_core_hw_regs(struct target *t) { int err; unsigned i; struct x86_32_common *x86_32 = target_to_x86_32(t); for (i = 0; i < (x86_32->cache->num_regs); i++) { if (NOT_AVAIL_REG == regs[i].pm_idx) continue; err = write_hw_reg(t, i, 0, 1); if (err != ERROR_OK) { LOG_ERROR("%s error restoring reg %s", __func__, x86_32->cache->reg_list[i].name); return err; } } LOG_DEBUG("write_all_core_hw_regs wrote %u registers ok", i); return ERROR_OK; }
static int restore_context(struct target *t) { int err = ERROR_OK; uint32_t i; struct x86_32_common *x86_32 = target_to_x86_32(t); /* write core regs into the core PM SRAM from the reg_cache */ err = write_all_core_hw_regs(t); if (err != ERROR_OK) { LOG_ERROR("%s error writing regs", __func__); return err; } for (i = 0; i < (x86_32->cache->num_regs); i++) { x86_32->cache->reg_list[i].dirty = 0; x86_32->cache->reg_list[i].valid = 0; } return err; }
static int read_all_core_hw_regs(struct target *t) { int err; uint32_t regval; unsigned i; struct x86_32_common *x86_32 = target_to_x86_32(t); for (i = 0; i < (x86_32->cache->num_regs); i++) { if (NOT_AVAIL_REG == regs[i].pm_idx) continue; err = read_hw_reg(t, regs[i].id, ®val, 1); if (err != ERROR_OK) { LOG_ERROR("%s error saving reg %s", __func__, x86_32->cache->reg_list[i].name); return err; } } LOG_DEBUG("read_all_core_hw_regs read %u registers ok", i); return ERROR_OK; }
/* * PIR (Probe Mode Instruction Register), SUBMITPIR is an "IR only" TAP * command; there is no corresponding data register */ static int submit_pir(struct target *t, uint64_t op) { struct x86_32_common *x86_32 = target_to_x86_32(t); uint32_t tapstatus = 0; uint8_t op_buf[8]; buf_set_u64(op_buf, 0, 64, op); int flush = x86_32->flush; x86_32->flush = 0; scan.out[0] = WRPIR; if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK) return ERROR_FAIL; if (drscan(t, op_buf, scan.out, PIR_SIZE) != ERROR_OK) return ERROR_FAIL; scan.out[0] = SUBMITPIR; x86_32->flush = flush; if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK) return ERROR_FAIL; jtag_add_sleep(DELAY_SUBMITPIR); /* HACK */ if (x86_32->flush) { int cnt = 10; do { tapstatus = get_tapstatus(t); if (!(tapstatus & TS_PIR_BIT)) return ERROR_OK; LOG_DEBUG("%s Waiting for TS_PIR_BIT, TS = 0x%08" PRIx32, __func__, tapstatus); usleep(100); cnt--; } while (cnt); LOG_ERROR("%s TS_PIR_BIT did not clear, TS = 0x%08" PRIx32, __func__, tapstatus); return ERROR_FAIL; /* TODO: find a nicer way to wait until PM_BIT changes*/ } return ERROR_OK; }
/* * PIR (Probe Mode Instruction Register), SUBMITPIR is an "IR only" TAP * command; there is no corresponding data register */ static int submit_pir(struct target *t, uint64_t op) { struct x86_32_common *x86_32 = target_to_x86_32(t); uint8_t op_buf[8]; buf_set_u64(op_buf, 0, 64, op); int flush = x86_32->flush; x86_32->flush = 0; scan.out[0] = WRPIR; if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK) return ERROR_FAIL; if (drscan(t, op_buf, scan.out, PIR_SIZE) != ERROR_OK) return ERROR_FAIL; scan.out[0] = SUBMITPIR; x86_32->flush = flush; if (irscan(t, scan.out, NULL, LMT_IRLEN) != ERROR_OK) return ERROR_FAIL; jtag_add_sleep(DELAY_SUBMITPIR); return ERROR_OK; }
int lakemont_resume(struct target *t, int current, uint32_t address, int handle_breakpoints, int debug_execution) { struct breakpoint *bp = NULL; struct x86_32_common *x86_32 = target_to_x86_32(t); if (check_not_halted(t)) return ERROR_TARGET_NOT_HALTED; /* TODO lakemont_enable_breakpoints(t); */ if (t->state == TARGET_HALTED) { /* running away for a software breakpoint needs some special handling */ uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32); bp = breakpoint_find(t, eip); if (bp != NULL /*&& bp->type == BKPT_SOFT*/) { /* the step will step over the breakpoint */ if (lakemont_step(t, 0, 0, 1) != ERROR_OK) { LOG_ERROR("%s stepping over a software breakpoint at 0x%08" PRIx32 " " "failed to resume the target", __func__, eip); return ERROR_FAIL; } } /* if breakpoints are enabled, we need to redirect these into probe mode */ struct breakpoint *activeswbp = t->breakpoints; while (activeswbp != NULL && activeswbp->set == 0) activeswbp = activeswbp->next; struct watchpoint *activehwbp = t->watchpoints; while (activehwbp != NULL && activehwbp->set == 0) activehwbp = activehwbp->next; if (activeswbp != NULL || activehwbp != NULL) buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1); if (do_resume(t) != ERROR_OK) return ERROR_FAIL; } else { LOG_USER("target not halted"); return ERROR_FAIL; } return ERROR_OK; }
static int drscan(struct target *t, uint8_t *out, uint8_t *in, uint8_t len) { int retval = ERROR_OK; uint64_t data = 0; struct x86_32_common *x86_32 = target_to_x86_32(t); if (NULL == t->tap) { retval = ERROR_FAIL; LOG_ERROR("%s invalid target tap", __func__); return retval; } if (len > MAX_SCAN_SIZE || 0 == len) { retval = ERROR_FAIL; LOG_ERROR("%s data len is %d bits, max is %d bits", __func__, len, MAX_SCAN_SIZE); return retval; } struct scan_field *fields = &scan.field; fields->out_value = out; fields->in_value = in; fields->num_bits = len; jtag_add_dr_scan(x86_32->curr_tap, 1, fields, TAP_IDLE); if (x86_32->flush) { retval = jtag_execute_queue(); if (retval != ERROR_OK) { LOG_ERROR("%s drscan failed to execute queue", __func__); return retval; } } if (in != NULL) { if (len >= 8) { for (int n = (len / 8) - 1 ; n >= 0; n--) data = (data << 8) + *(in+n); } else LOG_DEBUG("dr in 0x%02" PRIx8, *in); } else { LOG_ERROR("%s no drscan data", __func__); retval = ERROR_FAIL; } return retval; }
/* do whats needed to properly enter probemode for debug on lakemont */ static int halt_prep(struct target *t) { struct x86_32_common *x86_32 = target_to_x86_32(t); if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB); if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL); if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR); if (write_hw_reg(t, CSB, PM_DSB, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSB].name, PM_DSB); if (write_hw_reg(t, CSL, PM_DSL, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSL].name, PM_DSL); if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7); uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32); uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32); uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32); uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32); /* clear VM86 and IF bits if they are set */ LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags, eflags & EFLAGS_VM86 ? 1 : 0, eflags & EFLAGS_IF ? 1 : 0); if ((eflags & EFLAGS_VM86) || (eflags & EFLAGS_IF)) { x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF); if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d", x86_32->pm_regs[I(EFLAGS)], x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0, x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0); } /* set CPL to 0 for memory access */ if (csar & CSAR_DPL) { x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL; if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]); } if (ssar & SSAR_DPL) { x86_32->pm_regs[I(SSAR)] = ssar & ~SSAR_DPL; if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]); } /* if cache's are enabled, disable and flush, depending on the LMT core version */ /* TODO: we never disable and flush for LMT3.5 and always do for LMT1, but to * be clean, on LMT3.5 and later we should check bit 20 of TAPSTATUS * (1 == don't flush the cache) and decide what action to take (see LDO 3.7.1.5). * We can update the code once we know if we ever need to support LMT2/3 or a * new LMT architecture pops up. */ if (!(x86_32->core_type == LMT3_5) && !(cr0 & CR0_CD)) { LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0); if (cr0 & CR0_PG) { x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG; if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]); /* submit wbinvd to flush cache */ if (submit_reg_pir(t, WBINVD) != ERROR_OK) return ERROR_FAIL; x86_32->pm_regs[I(CR0)] = x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG); if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]); } } return ERROR_OK; }
static uint8_t get_num_user_regs(struct target *t) { struct x86_32_common *x86_32 = target_to_x86_32(t); return x86_32->cache->num_regs; }
int lakemont_step(struct target *t, int current, uint32_t address, int handle_breakpoints) { struct x86_32_common *x86_32 = target_to_x86_32(t); uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32); uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32); uint32_t pmcr = buf_get_u32(x86_32->cache->reg_list[PMCR].value, 0, 32); struct breakpoint *bp = NULL; int retval = ERROR_OK; uint32_t tapstatus = 0; if (check_not_halted(t)) return ERROR_TARGET_NOT_HALTED; bp = breakpoint_find(t, eip); if (retval == ERROR_OK && bp != NULL/*&& bp->type == BKPT_SOFT*/) { /* TODO: This should only be done for software breakpoints. * Stepping from hardware breakpoints should be possible with the resume flag * Needs testing. */ retval = x86_32_common_remove_breakpoint(t, bp); } /* Set EFLAGS[TF] and PMCR[IR], exit pm and wait for PRDY# */ LOG_DEBUG("modifying PMCR = 0x%08" PRIx32 " and EFLAGS = 0x%08" PRIx32, pmcr, eflags); eflags = eflags | (EFLAGS_TF | EFLAGS_RF); buf_set_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32, eflags); buf_set_u32(x86_32->cache->reg_list[PMCR].value, 0, 32, 1); LOG_DEBUG("EFLAGS [TF] [RF] bits set=0x%08" PRIx32 ", PMCR=0x%08" PRIx32 ", EIP=0x%08" PRIx32, eflags, pmcr, eip); tapstatus = get_tapstatus(t); t->debug_reason = DBG_REASON_SINGLESTEP; t->state = TARGET_DEBUG_RUNNING; if (restore_context(t) != ERROR_OK) return ERROR_FAIL; if (exit_probemode(t) != ERROR_OK) return ERROR_FAIL; target_call_event_callbacks(t, TARGET_EVENT_RESUMED); tapstatus = get_tapstatus(t); if (tapstatus & (TS_PM_BIT | TS_EN_PM_BIT | TS_PRDY_BIT | TS_PMCR_BIT)) { /* target has stopped */ if (save_context(t) != ERROR_OK) return ERROR_FAIL; if (halt_prep(t) != ERROR_OK) return ERROR_FAIL; t->state = TARGET_HALTED; LOG_USER("step done from EIP 0x%08" PRIx32 " to 0x%08" PRIx32, eip, buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32)); target_call_event_callbacks(t, TARGET_EVENT_HALTED); } else { /* target didn't stop * I hope the poll() will catch it, but the deleted breakpoint is gone */ LOG_ERROR("%s target didn't stop after executing a single step", __func__); t->state = TARGET_RUNNING; return ERROR_FAIL; } /* try to re-apply the breakpoint, even of step failed * TODO: When a bp was set, we should try to stop the target - fix the return above */ if (bp != NULL/*&& bp->type == BKPT_SOFT*/) { /* TODO: This should only be done for software breakpoints. * Stepping from hardware breakpoints should be possible with the resume flag * Needs testing. */ retval = x86_32_common_add_breakpoint(t, bp); } return retval; }
int lakemont_poll(struct target *t) { /* LMT1 PMCR register currently allows code breakpoints, data breakpoints, * single stepping and shutdowns to be redirected to PM but does not allow * redirecting into PM as a result of SMM enter and SMM exit */ uint32_t ts = get_tapstatus(t); if (ts == 0xFFFFFFFF && t->state != TARGET_DEBUG_RUNNING) { /* something is wrong here */ LOG_ERROR("tapstatus invalid - scan_chain serialization or locked JTAG access issues"); /* TODO: Give a hint that unlocking is wrong or maybe a * 'jtag arp_init' helps */ t->state = TARGET_DEBUG_RUNNING; return ERROR_OK; } if ((!(ts & TS_PM_BIT))) { if (t->state == TARGET_HALTED) { LOG_USER("target running for unknown reason"); } else if (t->state == TARGET_DEBUG_RUNNING) { LOG_USER("Debug running tapstatus=0x%08" PRIx32, ts); } LOG_DEBUG("tapstatus=0x%08" PRIx32, ts); t->state = TARGET_RUNNING; } if (t->state == TARGET_RUNNING || t->state == TARGET_DEBUG_RUNNING) { if (ts & TS_PM_BIT) { LOG_USER("redirect to PM, tapstatus=0x%08" PRIx32, get_tapstatus(t)); t->state = TARGET_DEBUG_RUNNING; if (save_context(t) != ERROR_OK) return ERROR_FAIL; if (halt_prep(t) != ERROR_OK) return ERROR_FAIL; t->state = TARGET_HALTED; t->debug_reason = DBG_REASON_UNDEFINED; struct x86_32_common *x86_32 = target_to_x86_32(t); uint32_t eip = buf_get_u32(x86_32->cache->reg_list[EIP].value, 0, 32); uint32_t dr6 = buf_get_u32(x86_32->cache->reg_list[DR6].value, 0, 32); uint32_t hwbreakpoint = (uint32_t)-1; if (dr6 & DR6_BRKDETECT_0) hwbreakpoint = 0; if (dr6 & DR6_BRKDETECT_1) hwbreakpoint = 1; if (dr6 & DR6_BRKDETECT_2) hwbreakpoint = 2; if (dr6 & DR6_BRKDETECT_3) hwbreakpoint = 3; if (hwbreakpoint != (uint32_t)-1) { uint32_t dr7 = buf_get_u32(x86_32->cache->reg_list[DR7].value, 0, 32); uint32_t type = dr7 & (0x03 << (DR7_RW_SHIFT + hwbreakpoint*DR7_RW_LEN_SIZE)); if (type == DR7_BP_EXECUTE) { LOG_USER("hit hardware breakpoint (hwreg=%" PRIu32 ") at 0x%08" PRIx32, hwbreakpoint, eip); } else { uint32_t address = 0; switch (hwbreakpoint) { default: case 0: address = buf_get_u32(x86_32->cache->reg_list[DR0].value, 0, 32); break; case 1: address = buf_get_u32(x86_32->cache->reg_list[DR1].value, 0, 32); break; case 2: address = buf_get_u32(x86_32->cache->reg_list[DR2].value, 0, 32); break; case 3: address = buf_get_u32(x86_32->cache->reg_list[DR3].value, 0, 32); break; } LOG_USER("hit '%s' watchpoint for 0x%08" PRIx32 " (hwreg=%" PRIu32 ") at 0x%08" PRIx32, type == DR7_BP_WRITE ? "write" : "access", address, hwbreakpoint, eip); } t->debug_reason = DBG_REASON_BREAKPOINT; } else { /* Check if the target hit a software breakpoint. * ! Watch out: EIP is currently pointing after the breakpoint opcode */ struct breakpoint *bp = NULL; bp = breakpoint_find(t, eip-1); if (bp != NULL) { t->debug_reason = DBG_REASON_BREAKPOINT; if (bp->type == BKPT_SOFT) { /* The EIP is now pointing the the next byte after the * breakpoint instruction. This needs to be corrected. */ buf_set_u32(x86_32->cache->reg_list[EIP].value, 0, 32, eip-1); x86_32->cache->reg_list[EIP].dirty = 1; x86_32->cache->reg_list[EIP].valid = 1; LOG_USER("hit software breakpoint at 0x%08" PRIx32, eip-1); } else { /* it's not a hardware breakpoint (checked already in DR6 state) * and it's also not a software breakpoint ... */ LOG_USER("hit unknown breakpoint at 0x%08" PRIx32, eip); } } else { /* There is also the case that we hit an breakpoint instruction, * which was not set by us. This needs to be handled be the * application that introduced the breakpoint. */ LOG_USER("unknown break reason at 0x%08" PRIx32, eip); } } return target_call_event_callbacks(t, TARGET_EVENT_HALTED); } } return ERROR_OK; }
/* do whats needed to properly enter probemode for debug on lakemont */ static int halt_prep(struct target *t) { struct x86_32_common *x86_32 = target_to_x86_32(t); if (write_hw_reg(t, DSB, PM_DSB, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSB].name, PM_DSB); if (write_hw_reg(t, DSL, PM_DSL, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write %s 0x%08" PRIx32, regs[DSL].name, PM_DSL); if (write_hw_reg(t, DSAR, PM_DSAR, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write DSAR 0x%08" PRIx32, PM_DSAR); if (write_hw_reg(t, CSB, PM_DSB, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSB].name, PM_DSB); if (write_hw_reg(t, CSL, PM_DSL, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write %s 0x%08" PRIx32, regs[CSL].name, PM_DSL); if (write_hw_reg(t, DR7, PM_DR7, 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write DR7 0x%08" PRIx32, PM_DR7); uint32_t eflags = buf_get_u32(x86_32->cache->reg_list[EFLAGS].value, 0, 32); uint32_t csar = buf_get_u32(x86_32->cache->reg_list[CSAR].value, 0, 32); uint32_t ssar = buf_get_u32(x86_32->cache->reg_list[SSAR].value, 0, 32); uint32_t cr0 = buf_get_u32(x86_32->cache->reg_list[CR0].value, 0, 32); /* clear VM86 and IF bits if they are set */ LOG_DEBUG("EFLAGS = 0x%08" PRIx32 ", VM86 = %d, IF = %d", eflags, eflags & EFLAGS_VM86 ? 1 : 0, eflags & EFLAGS_IF ? 1 : 0); if ((eflags & EFLAGS_VM86) || (eflags & EFLAGS_IF)) { x86_32->pm_regs[I(EFLAGS)] = eflags & ~(EFLAGS_VM86 | EFLAGS_IF); if (write_hw_reg(t, EFLAGS, x86_32->pm_regs[I(EFLAGS)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("EFLAGS now = 0x%08" PRIx32 ", VM86 = %d, IF = %d", x86_32->pm_regs[I(EFLAGS)], x86_32->pm_regs[I(EFLAGS)] & EFLAGS_VM86 ? 1 : 0, x86_32->pm_regs[I(EFLAGS)] & EFLAGS_IF ? 1 : 0); } /* set CPL to 0 for memory access */ if (csar & CSAR_DPL) { x86_32->pm_regs[I(CSAR)] = csar & ~CSAR_DPL; if (write_hw_reg(t, CSAR, x86_32->pm_regs[I(CSAR)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write CSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(CSAR)]); } if (ssar & SSAR_DPL) { x86_32->pm_regs[I(SSAR)] = ssar & ~SSAR_DPL; if (write_hw_reg(t, SSAR, x86_32->pm_regs[I(SSAR)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("write SSAR_CPL to 0 0x%08" PRIx32, x86_32->pm_regs[I(SSAR)]); } /* if cache's are enabled, disable and flush, depending on the core version */ if (!(x86_32->core_type == LMT3_5) && !(cr0 & CR0_CD)) { LOG_DEBUG("caching enabled CR0 = 0x%08" PRIx32, cr0); if (cr0 & CR0_PG) { x86_32->pm_regs[I(CR0)] = cr0 & ~CR0_PG; if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("cleared paging CR0_PG = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]); /* submit wbinvd to flush cache */ if (submit_reg_pir(t, WBINVD) != ERROR_OK) return ERROR_FAIL; x86_32->pm_regs[I(CR0)] = x86_32->pm_regs[I(CR0)] | (CR0_CD | CR0_NW | CR0_PG); if (write_hw_reg(t, CR0, x86_32->pm_regs[I(CR0)], 0) != ERROR_OK) return ERROR_FAIL; LOG_DEBUG("set CD, NW and PG, CR0 = 0x%08" PRIx32, x86_32->pm_regs[I(CR0)]); } } return ERROR_OK; }