static void op_cp15_load_store(word ins, void *data) { int CRd; int Rn; int cp_num; int P, U, N, W, L; int offset_8; // decode the fields in the instruction offset_8 = BITS(ins, 7, 0); cp_num = BITS_SHIFT(ins, 11, 8); CRd = BITS_SHIFT(ins, 15, 12); Rn = BITS_SHIFT(ins, 19, 16); L = BIT_SET(ins, 20); W = BIT_SET(ins, 21); N = BIT_SET(ins, 22); U = BIT_SET(ins, 23); P = BIT_SET(ins, 24); CPU_TRACE(5, "\t\top_cp15_load_store: cp_num %d L %d W %d N %d U %d P %d Rn %d CRd %d offset_8 %d\n", cp_num, L, W, N, U, P, Rn, CRd, offset_8); panic_cpu("op_co_load_store unimplemented\n"); CPU_TRACE(1, "warning, ignoring coprocessor instruction\n"); #if COUNT_ARM_OPS inc_perf_counter(OP_COP_LOAD_STORE); #endif }
static void op_cp15_data_processing(word ins, void *data) { int opcode_1, opcode_2; int CRn, CRm; int CRd; int cp_num; // decode the fields in the instruction CRm = BITS(ins, 3, 0); opcode_2 = BITS_SHIFT(ins, 7, 5); cp_num = BITS_SHIFT(ins, 11, 8); CRd = BITS_SHIFT(ins, 15, 12); CRn = BITS_SHIFT(ins, 19, 16); opcode_1 = BITS_SHIFT(ins, 23, 20); CPU_TRACE(5, "\t\top_cp15_data_processing: cp_num %d CRd %d CRn %d CRm %d op1 %d op2 %d\n", cp_num, CRd, CRn, CRm, opcode_1, opcode_2); panic_cpu("op_co_data_processing unimplemented\n"); CPU_TRACE(1, "warning, ignoring coprocessor instruction\n"); #if COUNT_ARM_OPS inc_perf_counter(OP_COP_DATA_PROC); #endif }
static void op_cp15_double_reg_transfer(word ins, void *data) { int CRm; int Rn, Rd; int opcode; int cp_num; int L; // decode the fields in the instruction CRm = BITS(ins, 3, 0); opcode = BITS_SHIFT(ins, 7, 4); cp_num = BITS_SHIFT(ins, 11, 8); Rd = BITS_SHIFT(ins, 15, 12); Rn = BITS_SHIFT(ins, 19, 16); L = BIT_SET(ins, 20); CPU_TRACE(5, "\t\top_cp15_double_reg_transfer: cp_num %d L %d Rd %d CRn %d CRm %d opcode %d\n", cp_num, L, Rd, CRm, opcode); panic_cpu("op_co_double_reg_transfer unimplemented\n"); CPU_TRACE(1, "warning, ignoring coprocessor instruction\n"); #if COUNT_ARM_OPS inc_perf_counter(OP_COP_REG_TRANS); #endif }
uint32_t zynq_get_clock(enum zynq_periph periph) { DEBUG_ASSERT(periph < _PERIPH_MAX); // get the clock control register base addr_t clk_reg = periph_clk_ctrl_reg(periph); DEBUG_ASSERT(clk_reg != 0); int enable_bitpos = periph_clk_ctrl_enable_bitpos(periph); LTRACEF("clkreg 0x%x\n", *REG32(clk_reg)); // see if it's enabled if (enable_bitpos >= 0) { if ((*REG32(clk_reg) & (1 << enable_bitpos)) == 0) { // not enabled return 0; } } // get the source clock uint32_t srcclk; switch (BITS_SHIFT(*REG32(clk_reg), 5, 4)) { case 0: case 1: srcclk = get_io_pll_freq(); break; case 2: srcclk = get_arm_pll_freq(); break; case 3: srcclk = get_ddr_pll_freq(); break; } // get the divisor out of the register uint32_t divisor = BITS_SHIFT(*REG32(clk_reg), 13, 8); if (divisor == 0) return 0; uint32_t divisor2 = 1; if (periph_clk_ctrl_divisor_count(periph) == 2) { divisor2 = BITS_SHIFT(*REG32(clk_reg), 25, 20); if (divisor2 == 0) return 0; } uint32_t clk = srcclk / divisor / divisor2; return clk; }
static void mmu_2nd_level_translate(unsigned int ptable_entry, armaddr_t address, armaddr_t *translated_address, enum mmu_access_type type, bool write, bool priviledged, int domain) { int subpage = 0; MMU_TRACE(7, "\t2nd level translate: ptable_entry 0x%08x\n", ptable_entry); switch(ptable_entry & 0x3) { case 1: // large page (64KB) *translated_address = BITS(ptable_entry, 31, 16) | BITS(address, 15, 0); /* figure out which subpage we are */ subpage = BITS_SHIFT(address, 15, 14); break; case 2: // small page (4KB) *translated_address = BITS(ptable_entry, 31, 12) | BITS(address, 11, 0); /* figure out which subpage we are */ subpage = BITS_SHIFT(address, 11, 10); break; case 3: // tiny page (1KB) *translated_address = BITS(ptable_entry, 31, 10) | BITS(address, 9, 0); /* there's only one subpage in on a tiny page descriptor */ subpage = 0; break; case 0: // not present /* page translation fault */ mmu_signal_fault(0x7, domain, address, type); return; } /* domain check */ enum mmu_domain_check_results domain_check = mmu_domain_check(domain); if(domain_check == DOMAIN_FAULT) { /* page domain fault */ mmu_signal_fault(0xb, domain, address, type); return; } else if(domain_check == CLIENT) { /* load the appropriate AP bits */ int AP = (ptable_entry >> (subpage * 2 + 4)) & 0x3; /* do perm check on AP bits */ enum mmu_permission_results allowed_perms = mmu_permission_check(AP, BITS_SHIFT(mmu.flags, 9, 8), priviledged); if(allowed_perms == NO_ACCESS || (allowed_perms == READ_ONLY && write)) { /* page permission fault */ mmu_signal_fault(0xf, domain, address, type); return; } } else { // domain_check == MANAGER
static uint32_t get_io_pll_freq(void) { LTRACEF("IO_PLL_CTRL 0x%x\n", SLCR_REG(IO_PLL_CTRL)); // XXX test that the pll is actually enabled uint32_t fdiv = BITS_SHIFT(SLCR_REG(IO_PLL_CTRL), 18, 12); return EXTERNAL_CLOCK_FREQ * fdiv; }
static uint32_t get_cpu_input_freq(void) { LTRACEF("ARM_CLK_CTRL 0x%x\n", SLCR_REG(ARM_CLK_CTRL)); uint32_t divisor = BITS_SHIFT(SLCR_REG(ARM_CLK_CTRL), 13, 8); uint32_t srcsel = BITS_SHIFT(SLCR_REG(ARM_CLK_CTRL), 5, 4); uint32_t srcclk; switch (srcsel) { default: case 0: case 1: // arm pll srcclk = get_arm_pll_freq(); break; case 2: // ddr pll srcclk = get_ddr_pll_freq(); break; case 3: // io pll srcclk = get_io_pll_freq(); break; } // cpu 6x4x return srcclk / divisor; }
static void op_cp15_reg_transfer(word ins, void *data) { int opcode_1, opcode_2; int CRn, CRm; int Rd; int L; int cp_num; reg_t val; // decode the fields in the instruction CRm = BITS(ins, 3, 0); opcode_2 = BITS_SHIFT(ins, 7, 5); cp_num = BITS_SHIFT(ins, 11, 8); Rd = BITS_SHIFT(ins, 15, 12); CRn = BITS_SHIFT(ins, 19, 16); L = BIT_SET(ins, 20); opcode_1 = BITS_SHIFT(ins, 23, 21); CPU_TRACE(5, "\t\top_cp15_reg_transfer: cp_num %d L %d Rd %d CRn %d CRm %d op1 %d op2 %d\n", cp_num, L, Rd, CRn, CRm, opcode_1, opcode_2); #if COUNT_ARM_OPS inc_perf_counter(OP_COP_REG_TRANS); #endif /* some coprocessors we dont care about */ switch(CRn) { case 0: // id register if(L) { val = cp15.id; goto loadval; } goto done; case 1: // system control register if(L) { val = cp15.cr1; goto loadval; } else { word newval = get_reg(Rd); if (newval & (1<<15)) { // armv5 backwards compatibility mode panic_cpu("backwards compatible PC load mode not supported in armv5 (bit 15 in cr1)\n"); } /* ignore all the other bits */ /* set high/low vector base */ set_exception_base((newval & (1<<13)) ? 0xffff0000 : 0); /* load the potentially new mmu config */ mmu_set_flags(newval & MMU_FLAG_MASK); /* save our config */ cp15.cr1 = newval; goto done; } case 2: // translation table base if(L) { val = mmu_get_register(MMU_TRANS_TABLE_REG); goto loadval; } else { mmu_set_register(MMU_TRANS_TABLE_REG, get_reg(Rd)); goto done; } case 3: // domain access control if(L) { val = mmu_get_register(MMU_DOMAIN_ACCESS_CONTROL_REG); goto loadval; } else { mmu_set_register(MMU_DOMAIN_ACCESS_CONTROL_REG, get_reg(Rd)); goto done; } case 4: // unpredictable goto donothing; case 5: // fault state register if(L) { val = mmu_get_register(MMU_FAULT_STATUS_REG); goto loadval; } else { mmu_set_register(MMU_FAULT_STATUS_REG, get_reg(Rd)); goto done; } case 6: // fault address register if(L) { val = mmu_get_register(MMU_FAULT_ADDRESS_REG); goto loadval; } else { mmu_set_register(MMU_FAULT_ADDRESS_REG, get_reg(Rd)); goto done; } case 7: // cache registers, we only sort of emulate the instruction cache CPU_TRACE(5, "cache instruction: L %d m %d n %d d %d op1 %d op2 %d\n", L, CRm, CRn, Rd, opcode_1, opcode_2); if (!L) { switch(CRm) { case 0: // wait for interrupt goto done; case 5: // various forms of ICache invalidation case 7: // invalidate Icache + Dcache flush_all_codepages(); goto done; case 6: // invalidate dcache goto done; case 10: // clean dcache & drain write buffer goto done; case 13: // prefetch icache goto done; case 14: // clean and invalidate dcache goto done; } } else { // store switch(CRm) { case 14: // clean and invalidate dcache case 10: // clean dcache & drain write buffer // arm926 special cache routine that makes it easy to clear the entire cache // since it doesn't really do anything, probably okay to leave it in for all cores if (opcode_2 == 3) { // test and clean, side effect is to set the NZ condition val = 0; // (1<<30); set_NZ_condition(val); goto loadval; } } goto donothing; } case 8: // tlb flush if (L) { switch(CRm) { case 7: // unified TLB case 5: // instruction TLB flush_all_codepages(); // fall through case 6: // data TLB mmu_invalidate_tcache(); goto done; } } goto donothing; case 9: // cache lockdown goto donothing; case 10: // tlb lockdown goto donothing; case 11: case 12: // unpredictable goto donothing; case 13: // process id register if(L) { val = cp15.process_id; goto loadval; } else { cp15.process_id = get_reg(Rd); goto done; } case 14: // reserved goto donothing; case 15: // test and debug goto donothing; default: goto donothing; } loadval: if(L && Rd != 15) put_reg(Rd, val); goto done; unsupported: panic_cpu("reading from or writing to unsupported cp15 control register!\n"); donothing: if(L && Rd != 15) { put_reg(Rd, 0); } done: ; }