static int armv7a_l2x_cache_init(struct target *target, uint32_t base, uint32_t way) { struct armv7a_l2x_cache *l2x_cache; struct target_list *head = target->head; struct target *curr; struct armv7a_common *armv7a = target_to_armv7a(target); if (armv7a->armv7a_mmu.armv7a_cache.outer_cache) { LOG_ERROR("L2 cache was already initialised\n"); return ERROR_FAIL; } l2x_cache = calloc(1, sizeof(struct armv7a_l2x_cache)); l2x_cache->base = base; l2x_cache->way = way; armv7a->armv7a_mmu.armv7a_cache.outer_cache = l2x_cache; /* initialize all targets in this cluster (smp target) * l2 cache must be configured after smp declaration */ while (head != (struct target_list *)NULL) { curr = head->target; if (curr != target) { armv7a = target_to_armv7a(curr); if (armv7a->armv7a_mmu.armv7a_cache.outer_cache) { LOG_ERROR("smp target : cache l2 already initialized\n"); return ERROR_FAIL; } armv7a->armv7a_mmu.armv7a_cache.outer_cache = l2x_cache; } head = head->next; } return ERROR_OK; }
static int armv7a_l1_d_cache_clean_inval_all(struct target *target) { struct armv7a_common *armv7a = target_to_armv7a(target); struct armv7a_cache_common *cache = &(armv7a->armv7a_mmu.armv7a_cache); struct arm_dpm *dpm = armv7a->arm.dpm; int cl; int retval; retval = armv7a_l1_d_cache_sanity_check(target); if (retval != ERROR_OK) return retval; retval = dpm->prepare(dpm); if (retval != ERROR_OK) goto done; for (cl = 0; cl < cache->loc; cl++) { /* skip i-only caches */ if (cache->arch[cl].ctype < CACHE_LEVEL_HAS_D_CACHE) continue; armv7a_l1_d_cache_flush_level(dpm, &cache->arch[cl].d_u_size, cl); } retval = dpm->finish(dpm); return retval; done: LOG_ERROR("clean invalidate failed"); dpm->finish(dpm); return retval; }
int armv7a_cache_auto_flush_all_data(struct target *target) { int retval = ERROR_FAIL; struct armv7a_common *armv7a = target_to_armv7a(target); if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) return ERROR_OK; if (target->smp) { struct target_list *head; struct target *curr; head = target->head; while (head != (struct target_list *)NULL) { curr = head->target; if (curr->state == TARGET_HALTED) retval = armv7a_l1_d_cache_clean_inval_all(curr); head = head->next; } } else retval = armv7a_l1_d_cache_clean_inval_all(target); /* do outer cache flushing after inner caches have been flushed */ retval = arm7a_l2x_flush_all_data(target); return retval; }
int armv7a_l2x_cache_flush_virt(struct target *target, target_addr_t virt, uint32_t size) { struct armv7a_common *armv7a = target_to_armv7a(target); struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *) (armv7a->armv7a_mmu.armv7a_cache.outer_cache); /* FIXME: different controllers have different linelen? */ uint32_t i, linelen = 32; int retval; retval = arm7a_l2x_sanity_check(target); if (retval) return retval; for (i = 0; i < size; i += linelen) { target_addr_t pa, offs = virt + i; /* FIXME: use less verbose virt2phys? */ retval = target->type->virt2phys(target, offs, &pa); if (retval != ERROR_OK) goto done; retval = target_write_phys_u32(target, l2x_cache->base + L2X0_CLEAN_INV_LINE_PA, pa); if (retval != ERROR_OK) goto done; } return retval; done: LOG_ERROR("d-cache invalidate failed"); return retval; }
int armv7a_l1_d_cache_inval_virt(struct target *target, uint32_t virt, uint32_t size) { struct armv7a_common *armv7a = target_to_armv7a(target); struct arm_dpm *dpm = armv7a->arm.dpm; struct armv7a_cache_common *armv7a_cache = &armv7a->armv7a_mmu.armv7a_cache; uint32_t linelen = armv7a_cache->dminline; uint32_t va_line, va_end; int retval; retval = armv7a_l1_d_cache_sanity_check(target); if (retval != ERROR_OK) return retval; retval = dpm->prepare(dpm); if (retval != ERROR_OK) goto done; va_line = virt & (-linelen); va_end = virt + size; /* handle unaligned start */ if (virt != va_line) { /* DCCIMVAC */ retval = dpm->instr_write_data_r0(dpm, ARMV4_5_MCR(15, 0, 0, 7, 14, 1), va_line); if (retval != ERROR_OK) goto done; va_line += linelen; } /* handle unaligned end */ if ((va_end & (linelen-1)) != 0) { va_end &= (-linelen); /* DCCIMVAC */ retval = dpm->instr_write_data_r0(dpm, ARMV4_5_MCR(15, 0, 0, 7, 14, 1), va_end); if (retval != ERROR_OK) goto done; } while (va_line < va_end) { /* DCIMVAC - Invalidate data cache line by VA to PoC. */ retval = dpm->instr_write_data_r0(dpm, ARMV4_5_MCR(15, 0, 0, 7, 6, 1), va_line); if (retval != ERROR_OK) goto done; va_line += linelen; } dpm->finish(dpm); return retval; done: LOG_ERROR("d-cache invalidate failed"); dpm->finish(dpm); return retval; }
/* * We assume that target core was chosen correctly. It means if same data * was handled by two cores, other core will loose the changes. Since it * is impossible to know (FIXME) which core has correct data, keep in mind * that some kind of data lost or korruption is possible. * Possible scenario: * - core1 loaded and changed data on 0x12345678 * - we halted target and modified same data on core0 * - data on core1 will be lost. */ int armv7a_cache_auto_flush_on_write(struct target *target, uint32_t virt, uint32_t size) { struct armv7a_common *armv7a = target_to_armv7a(target); if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) return ERROR_OK; return armv7a_cache_flush_virt(target, virt, size); }
static int armv7a_l1_i_cache_sanity_check(struct target *target) { struct armv7a_common *armv7a = target_to_armv7a(target); if (target->state != TARGET_HALTED) { LOG_ERROR("%s: target not halted", __func__); return ERROR_TARGET_NOT_HALTED; } /* check that cache data is on at target halt */ if (!armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) { LOG_DEBUG("instruction cache is not enabled"); return ERROR_TARGET_INVALID; } return ERROR_OK; }
/* * clean and invalidate complete l2x cache */ int arm7a_l2x_flush_all_data(struct target *target) { struct armv7a_common *armv7a = target_to_armv7a(target); struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *) (armv7a->armv7a_mmu.armv7a_cache.outer_cache); uint32_t l2_way_val; int retval; retval = arm7a_l2x_sanity_check(target); if (retval) return retval; l2_way_val = (1 << l2x_cache->way) - 1; return target_write_phys_u32(target, l2x_cache->base + L2X0_CLEAN_INV_WAY, l2_way_val); }
static int arm7a_l2x_sanity_check(struct target *target) { struct armv7a_common *armv7a = target_to_armv7a(target); struct armv7a_l2x_cache *l2x_cache = (struct armv7a_l2x_cache *) (armv7a->armv7a_mmu.armv7a_cache.outer_cache); if (target->state != TARGET_HALTED) { LOG_ERROR("%s: target not halted", __func__); return ERROR_TARGET_NOT_HALTED; } if (!l2x_cache || !l2x_cache->base) { LOG_DEBUG("l2x is not configured!"); return ERROR_FAIL; } return ERROR_OK; }
static int post_result(struct target *target) { struct arm *arm = target_to_arm(target); /* REVISIT this looks wrong ... ARM11 and Cortex-A8 * should work this way at least sometimes. */ if (is_arm7_9(target_to_arm7_9(target)) || is_armv7a(target_to_armv7a(target))) { uint32_t spsr; /* return value in R0 */ buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, arm->semihosting_result); arm->core_cache->reg_list[0].dirty = 1; /* LR --> PC */ buf_set_u32(arm->core_cache->reg_list[15].value, 0, 32, buf_get_u32(arm_reg_current(arm, 14)->value, 0, 32)); arm->core_cache->reg_list[15].dirty = 1; /* saved PSR --> current PSR */ spsr = buf_get_u32(arm->spsr->value, 0, 32); /* REVISIT should this be arm_set_cpsr(arm, spsr) * instead of a partially unrolled version? */ buf_set_u32(arm->cpsr->value, 0, 32, spsr); arm->cpsr->dirty = 1; arm->core_mode = spsr & 0x1f; if (spsr & 0x20) arm->core_state = ARM_STATE_THUMB; } else { /* resume execution, this will be pc+2 to skip over the * bkpt instruction */ /* return result in R0 */ buf_set_u32(arm->core_cache->reg_list[0].value, 0, 32, arm->semihosting_result); arm->core_cache->reg_list[0].dirty = 1; } return ERROR_OK; }
int armv7a_l1_i_cache_inval_virt(struct target *target, uint32_t virt, uint32_t size) { struct armv7a_common *armv7a = target_to_armv7a(target); struct arm_dpm *dpm = armv7a->arm.dpm; struct armv7a_cache_common *armv7a_cache = &armv7a->armv7a_mmu.armv7a_cache; uint32_t linelen = armv7a_cache->iminline; uint32_t va_line, va_end; int retval; retval = armv7a_l1_i_cache_sanity_check(target); if (retval != ERROR_OK) return retval; retval = dpm->prepare(dpm); if (retval != ERROR_OK) goto done; va_line = virt & (-linelen); va_end = virt + size; while (va_line < va_end) { /* ICIMVAU - Invalidate instruction cache by VA to PoU. */ retval = dpm->instr_write_data_r0(dpm, ARMV4_5_MCR(15, 0, 0, 7, 5, 1), va_line); if (retval != ERROR_OK) goto done; /* BPIMVA */ retval = dpm->instr_write_data_r0(dpm, ARMV4_5_MCR(15, 0, 0, 7, 5, 7), va_line); if (retval != ERROR_OK) goto done; va_line += linelen; } return retval; done: LOG_ERROR("i-cache invalidate failed"); dpm->finish(dpm); return retval; }
int armv7a_l1_d_cache_flush_virt(struct target *target, uint32_t virt, unsigned int size) { struct armv7a_common *armv7a = target_to_armv7a(target); struct arm_dpm *dpm = armv7a->arm.dpm; struct armv7a_cache_common *armv7a_cache = &armv7a->armv7a_mmu.armv7a_cache; uint32_t linelen = armv7a_cache->dminline; uint32_t va_line, va_end; int retval; retval = armv7a_l1_d_cache_sanity_check(target); if (retval != ERROR_OK) return retval; retval = dpm->prepare(dpm); if (retval != ERROR_OK) goto done; va_line = virt & (-linelen); va_end = virt + size; while (va_line < va_end) { /* DCCIMVAC */ retval = dpm->instr_write_data_r0(dpm, ARMV4_5_MCR(15, 0, 0, 7, 14, 1), va_line); if (retval != ERROR_OK) goto done; va_line += linelen; } dpm->finish(dpm); return retval; done: LOG_ERROR("d-cache invalidate failed"); dpm->finish(dpm); return retval; }
int armv7a_l1_i_cache_inval_all(struct target *target) { struct armv7a_common *armv7a = target_to_armv7a(target); struct arm_dpm *dpm = armv7a->arm.dpm; int retval; retval = armv7a_l1_i_cache_sanity_check(target); if (retval != ERROR_OK) return retval; retval = dpm->prepare(dpm); if (retval != ERROR_OK) goto done; if (target->smp) { /* ICIALLUIS */ retval = dpm->instr_write_data_r0(dpm, ARMV4_5_MCR(15, 0, 0, 7, 1, 0), 0); } else { /* ICIALLU */ retval = dpm->instr_write_data_r0(dpm, ARMV4_5_MCR(15, 0, 0, 7, 5, 0), 0); } if (retval != ERROR_OK) goto done; dpm->finish(dpm); return retval; done: LOG_ERROR("i-cache invalidate failed"); dpm->finish(dpm); return retval; }
/** * Checks for and processes an ARM semihosting request. This is meant * to be called when the target is stopped due to a debug mode entry. * If the value 0 is returned then there was nothing to process. A non-zero * return value signifies that a request was processed and the target resumed, * or an error was encountered, in which case the caller must return * immediately. * * @param target Pointer to the ARM target to process. This target must * not represent an ARMv6-M or ARMv7-M processor. * @param retval Pointer to a location where the return code will be stored * @return non-zero value if a request was processed or an error encountered */ int arm_semihosting(struct target *target, int *retval) { struct arm *arm = target_to_arm(target); struct armv7a_common *armv7a = target_to_armv7a(target); uint32_t pc, lr, spsr; struct reg *r; if (!arm->is_semihosting) return 0; if (is_arm7_9(target_to_arm7_9(target)) || is_armv7a(armv7a)) { uint32_t vbar = 0x00000000; if (arm->core_mode != ARM_MODE_SVC) return 0; if (is_armv7a(armv7a)) { struct arm_dpm *dpm = armv7a->arm.dpm; *retval = dpm->prepare(dpm); if (*retval == ERROR_OK) { *retval = dpm->instr_read_data_r0(dpm, ARMV4_5_MRC(15, 0, 0, 12, 0, 0), &vbar); dpm->finish(dpm); if (*retval != ERROR_OK) return 1; } else { return 1; } } /* Check for PC == 0x00000008 or 0xffff0008: Supervisor Call vector. */ r = arm->pc; pc = buf_get_u32(r->value, 0, 32); if (pc != (vbar + 0x00000008) && pc != 0xffff0008) return 0; r = arm_reg_current(arm, 14); lr = buf_get_u32(r->value, 0, 32); /* Core-specific code should make sure SPSR is retrieved * when the above checks pass... */ if (!arm->spsr->valid) { LOG_ERROR("SPSR not valid!"); *retval = ERROR_FAIL; return 1; } spsr = buf_get_u32(arm->spsr->value, 0, 32); /* check instruction that triggered this trap */ if (spsr & (1 << 5)) { /* was in Thumb (or ThumbEE) mode */ uint8_t insn_buf[2]; uint16_t insn; *retval = target_read_memory(target, lr-2, 2, 1, insn_buf); if (*retval != ERROR_OK) return 1; insn = target_buffer_get_u16(target, insn_buf); /* SVC 0xab */ if (insn != 0xDFAB) return 0; } else if (spsr & (1 << 24)) { /* was in Jazelle mode */ return 0; } else { /* was in ARM mode */ uint8_t insn_buf[4]; uint32_t insn; *retval = target_read_memory(target, lr-4, 4, 1, insn_buf); if (*retval != ERROR_OK) return 1; insn = target_buffer_get_u32(target, insn_buf); /* SVC 0x123456 */ if (insn != 0xEF123456) return 0; } } else if (is_armv7m(target_to_armv7m(target))) { uint16_t insn; if (target->debug_reason != DBG_REASON_BREAKPOINT) return 0; r = arm->pc; pc = buf_get_u32(r->value, 0, 32); pc &= ~1; *retval = target_read_u16(target, pc, &insn); if (*retval != ERROR_OK) return 1; /* bkpt 0xAB */ if (insn != 0xBEAB) return 0; } else { LOG_ERROR("Unsupported semi-hosting Target"); return 0; } /* Perform semihosting if we are not waiting on a fileio * operation to complete. */ if (!arm->semihosting_hit_fileio) { *retval = do_semihosting(target); if (*retval != ERROR_OK) { LOG_ERROR("Failed semihosting operation"); return 0; } } /* Post result to target if we are not waiting on a fileio * operation to complete: */ if (!arm->semihosting_hit_fileio) { *retval = post_result(target); if (*retval != ERROR_OK) { LOG_ERROR("Failed to post semihosting result"); return 0; } *retval = target_resume(target, 1, 0, 0, 0); if (*retval != ERROR_OK) { LOG_ERROR("Failed to resume target"); return 0; } return 1; } return 0; }