void octeon_check_cpu_bist(void) { const int coreid = octeon_get_core_num(); uint64_t mask; uint64_t bist_val; /* Check BIST results for COP0 registers */ mask = 0x1f00000000ull; bist_val = __read_64bit_c0_register($27,0); if (bist_val & mask) printk("Core%d BIST Failure: CacheErr(icache) = 0x%lx\n", coreid, bist_val); bist_val = __read_64bit_c0_register($27,1); if (bist_val & 1) printk("Core%d L1 Dcache parity error: CacheErr(dcache) = 0x%lx\n", coreid, bist_val); mask = 0xfc00000000000000ull; bist_val = __read_64bit_c0_register($11,7); if (bist_val & mask) printk("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%lx\n", coreid, bist_val); __write_64bit_c0_register($27,1,0); mask = 0x18ull; bist_val = octeon_read_csr(OCTEON_L2D_ERR); octeon_write_csr(OCTEON_L2D_ERR, mask); /* Clear error bits */ if (bist_val & mask) printk("Core%d L2 Parity error: L2D_ERR = 0x%lx\n", coreid, bist_val); }
/** * Update the counters for each core. * * @param arg */ static void proc_perf_update_counters(void *arg) { int cpu = smp_processor_id(); proc_perf_counter_data[cpu][0] = __read_64bit_c0_register($25, 1); proc_perf_counter_data[cpu][1] = __read_64bit_c0_register($25, 3); mb(); }
asmlinkage void do_ade(struct pt_regs *regs) { enum ctx_state prev_state; unsigned int __user *pc; mm_segment_t seg; #if defined(CONFIG_CPU_CAVIUM_OCTEON) && (CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0) /* * Allows tasks to access CVMSEG addresses. These are special * addresses into the Octeon L1 Cache that can be used as fast * scratch memory. By default access to this memory is * disabled so we don't have to save it on context * switch. When a userspace task references one of these * addresses, we enable the region and size it to match the * app. */ const unsigned long CVMSEG_BASE = 0xffffffffffff8000ul; const unsigned long CVMSEG_IO = 0xffffffffffffa200ul; u64 cvmmemctl = __read_64bit_c0_register($11, 7); unsigned long cvmseg_size = (cvmmemctl & 0x3f) * 128; if ((regs->cp0_badvaddr == CVMSEG_IO) || ((regs->cp0_badvaddr >= CVMSEG_BASE) && (regs->cp0_badvaddr < CVMSEG_BASE + cvmseg_size))) { preempt_disable(); cvmmemctl = __read_64bit_c0_register($11, 7); /* Make sure all async operations are done */ asm volatile ("synciobdma" ::: "memory"); /* Enable userspace access to CVMSEG */ cvmmemctl |= 1 << 6; __write_64bit_c0_register($11, 7, cvmmemctl); # ifdef CONFIG_FAST_ACCESS_TO_THREAD_POINTER /* * Restore the processes CVMSEG data. Leave off the * last 8 bytes since the kernel stores the thread * pointer there. */ memcpy((void *)CVMSEG_BASE, current->thread.cvmseg.cvmseg, cvmseg_size - 8); # else /* Restore the processes CVMSEG data */ memcpy((void *)CVMSEG_BASE, current->thread.cvmseg.cvmseg, cvmseg_size); # endif preempt_enable(); return; }
static irqreturn_t octeon_perfcount_handler(int irq, void * dev_id, struct pt_regs *regs) { uint64_t counter; counter = __read_64bit_c0_register($25, 1); if (counter & (1ull<<63)) { oprofile_add_sample(regs, 0); __write_64bit_c0_register($25, 1, octeon_config.reset_value[0]); } counter = __read_64bit_c0_register($25, 3); if (counter & (1ull<<63)) { oprofile_add_sample(regs, 1); __write_64bit_c0_register($25, 3, octeon_config.reset_value[1]); } return IRQ_HANDLED; }
/** * * @return */ void octeon_user_io_init(void) { octeon_cvmemctl_t cvmmemctl; octeon_iob_fau_timeout_t fau_timeout; octeon_pow_nw_tim_t nm_tim; /* Get the current settings for CP0_CVMMEMCTL_REG */ cvmmemctl.u64 = __read_64bit_c0_register($11, 7); cvmmemctl.s.dismarkwblongto = 0; /**< R/W If set, marked write-buffer entries time out the same as as other entries; if clear, marked write-buffer entries use the maximum timeout. */ cvmmemctl.s.dismrgclrwbto = 0; /**< R/W If set, a merged store does not clear the write-buffer entry timeout state. */ cvmmemctl.s.iobdmascrmsb = 0; /**< R/W Two bits that are the MSBs of the resultant CVMSEG LM word location for an IOBDMA. The other 8 bits come from the SCRADDR field of the IOBDMA. */ cvmmemctl.s.syncwsmarked = 0; /**< R/W If set, SYNCWS and SYNCS only order marked stores; if clear, SYNCWS and SYNCS only order unmarked stores. SYNCWSMARKED has no effect when DISSYNCWS is set. */ cvmmemctl.s.dissyncws = 0; /**< R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */ if (octeon_is_pass1()) cvmmemctl.s.diswbfst = 0; /**< R/W If set, no stall happens on write buffer full. */ else cvmmemctl.s.diswbfst = 1; /**< R/W If set, no stall happens on write buffer full. */ cvmmemctl.s.xkmemenas = 0; /**< R/W If set (and SX set), supervisor-level loads/stores can use XKPHYS addresses with VA<48>==0 */ #ifdef CONFIG_CAVIUM_OCTEON_USER_MEM cvmmemctl.s.xkmemenau = 1; /**< R/W If set (and UX set), user-level loads/stores can use XKPHYS addresses with VA<48>==0 */ #else cvmmemctl.s.xkmemenau = 0; #endif cvmmemctl.s.xkioenas = 0; /**< R/W If set (and SX set), supervisor-level loads/stores can use XKPHYS addresses with VA<48>==1 */ cvmmemctl.s.xkioenau = 1; /**< R/W If set (and UX set), user-level loads/stores can use XKPHYS addresses with VA<48>==1 */ cvmmemctl.s.allsyncw = 0; /**< R/W If set, all stores act as SYNCW (NOMERGE must be set when this is set) RW, reset to 0. */ cvmmemctl.s.nomerge = 0; /**< R/W If set, no stores merge, and all stores reach the coherent bus in order. */ cvmmemctl.s.didtto = 0; /**< R/W Selects the bit in the counter used for DID time-outs 0 = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is between 1× and 2× this interval. For example, with DIDTTO=3, expiration interval is between 16K and 32K. */ cvmmemctl.s.csrckalwys = 0; /**< R/W If set, the (mem) CSR clock never turns off. */ cvmmemctl.s.mclkalwys = 0; /**< R/W If set, mclk never turns off. */ cvmmemctl.s.wbfltime = 0; /**< R/W Selects the bit in the counter used for write buffer flush time-outs (WBFLT+11) is the bit position in an internal counter used to determine expiration. The write buffer expires between 1× and 2× this interval. For example, with WBFLT = 0, a write buffer expires between 2K and 4K cycles after the write buffer entry is allocated. */ cvmmemctl.s.istrnol2 = 0; /**< R/W If set, do not put Istream in the L2 cache. */ cvmmemctl.s.wbthresh = 10; /**< R/W The write buffer threshold. */ cvmmemctl.s.cvmsegenak = 1; /**< R/W If set, CVMSEG is available for loads/stores in kernel/debug mode. */ cvmmemctl.s.cvmsegenas = 0; /**< R/W If set, CVMSEG is available for loads/stores in supervisor mode. */ cvmmemctl.s.cvmsegenau = 0; /**< R/W If set, CVMSEG is available for loads/stores in user mode. */ cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; /**< R/W Size of local memory in cache blocks, 54 (6912 bytes) is max legal value. */ if (smp_processor_id() == 0) printk("CVMSEG size: %d cache lines (%d bytes)\n", CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128); __write_64bit_c0_register($11, 7, cvmmemctl.u64); /* Set a default for the hardware timeouts */ fau_timeout.u64 = 0; fau_timeout.s.tout_enb = 1; fau_timeout.s.tout_val = 16; /* 4096 cycles */ octeon_write_csr(OCTEON_IOB_FAU_TIMEOUT, fau_timeout.u64); nm_tim.u64 = 0; nm_tim.s.nw_tim = 3; /* 4096 cycles */ octeon_write_csr(OCTEON_POW_NW_TIM, nm_tim.u64); }