void mips_cm_error_report(void) { u64 cm_error, cm_addr, cm_other; unsigned long revision; int ocause, cause; char buf[256]; if (!mips_cm_present()) return; revision = mips_cm_revision(); if (revision < CM_REV_CM3) { /* CM2 */ cm_error = read_gcr_error_cause(); cm_addr = read_gcr_error_addr(); cm_other = read_gcr_error_mult(); cause = cm_error >> CM_GCR_ERROR_CAUSE_ERRTYPE_SHF; ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF; if (!cause) return; if (cause < 16) { unsigned long cca_bits = (cm_error >> 15) & 7; unsigned long tr_bits = (cm_error >> 12) & 7; unsigned long cmd_bits = (cm_error >> 7) & 0x1f; unsigned long stag_bits = (cm_error >> 3) & 15; unsigned long sport_bits = (cm_error >> 0) & 7; snprintf(buf, sizeof(buf), "CCA=%lu TR=%s MCmd=%s STag=%lu " "SPort=%lu\n", cca_bits, cm2_tr[tr_bits], cm2_cmd[cmd_bits], stag_bits, sport_bits); } else {
void mips_cm_lock_other(unsigned int core, unsigned int vp) { unsigned curr_core; u32 val; preempt_disable(); curr_core = current_cpu_data.core; spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), per_cpu(cm_core_lock_flags, curr_core)); if (mips_cm_revision() >= CM_REV_CM3) { val = core << CM3_GCR_Cx_OTHER_CORE_SHF; val |= vp << CM3_GCR_Cx_OTHER_VP_SHF; } else { BUG_ON(vp != 0); val = core << CM_GCR_Cx_OTHER_CORENUM_SHF; } write_gcr_cl_other(val); /* * Ensure the core-other region reflects the appropriate core & * VP before any accesses to it occur. */ mb(); }
static void __init cps_smp_setup(void) { unsigned int ncores, nvpes, core_vpes; unsigned long core_entry; int c, v; /* Detect & record VPE topology */ ncores = mips_cm_numcores(); pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); for (c = nvpes = 0; c < ncores; c++) { core_vpes = core_vpe_count(c); pr_cont("%c%u", c ? ',' : '{', core_vpes); /* Use the number of VPEs in core 0 for smp_num_siblings */ if (!c) smp_num_siblings = core_vpes; for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { cpu_data[nvpes + v].core = c; #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) cpu_data[nvpes + v].vpe_id = v; #endif } nvpes += core_vpes; } pr_cont("} total %u\n", nvpes); /* Indicate present CPUs (CPU being synonymous with VPE) */ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { set_cpu_possible(v, true); set_cpu_present(v, true); __cpu_number_map[v] = v; __cpu_logical_map[v] = v; } /* Set a coherent default CCA (CWB) */ change_c0_config(CONF_CM_CMASK, 0x5); /* Core 0 is powered up (we're running on it) */ bitmap_set(core_power, 0, 1); /* Initialise core 0 */ mips_cps_core_init(); /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); if (mips_cm_revision() >= CM_REV_CM3) { core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); write_gcr_bev_base(core_entry); } #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) cpumask_set_cpu(0, &mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ }
int mips_cm_probe(void) { phys_addr_t addr; u32 base_reg; unsigned cpu; /* * No need to probe again if we have already been * here before. */ if (mips_cm_base) return 0; addr = mips_cm_phys_base(); BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr); if (!addr) return -ENODEV; mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE); if (!mips_cm_base) return -ENXIO; /* sanity check that we're looking at a CM */ base_reg = read_gcr_base(); if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) { pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n", (unsigned long)addr); mips_cm_base = NULL; return -ENODEV; } /* set default target to memory */ base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK; base_reg |= CM_GCR_BASE_CMDEFTGT_MEM; write_gcr_base(base_reg); /* disable CM regions */ write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK); write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK); write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK); write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK); write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK); /* probe for an L2-only sync region */ mips_cm_probe_l2sync(); /* determine register width for this CM */ mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3); for_each_possible_cpu(cpu) spin_lock_init(&per_cpu(cm_core_lock, cpu)); return 0; }
void mips_cm_lock_other(unsigned int cluster, unsigned int core, unsigned int vp, unsigned int block) { unsigned int curr_core, cm_rev; u32 val; cm_rev = mips_cm_revision(); preempt_disable(); if (cm_rev >= CM_REV_CM3) { val = core << __ffs(CM3_GCR_Cx_OTHER_CORE); val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP); if (cm_rev >= CM_REV_CM3_5) { val |= CM_GCR_Cx_OTHER_CLUSTER_EN; val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER); val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK); } else { WARN_ON(cluster != 0); WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); } /* * We need to disable interrupts in SMP systems in order to * ensure that we don't interrupt the caller with code which * may modify the redirect register. We do so here in a * slightly obscure way by using a spin lock, since this has * the neat property of also catching any nested uses of * mips_cm_lock_other() leading to a deadlock or a nice warning * with lockdep enabled. */ spin_lock_irqsave(this_cpu_ptr(&cm_core_lock), *this_cpu_ptr(&cm_core_lock_flags)); } else { WARN_ON(cluster != 0); WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* * We only have a GCR_CL_OTHER per core in systems with * CM 2.5 & older, so have to ensure other VP(E)s don't * race with us. */ curr_core = cpu_core(¤t_cpu_data); spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), per_cpu(cm_core_lock_flags, curr_core)); val = core << __ffs(CM_GCR_Cx_OTHER_CORENUM); } write_gcr_cl_other(val); /* * Ensure the core-other region reflects the appropriate core & * VP before any accesses to it occur. */ mb(); }
void mips_cpc_unlock_other(void) { unsigned int curr_core; if (mips_cm_revision() >= CM_REV_CM3) /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ return; curr_core = cpu_core(¤t_cpu_data); spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), per_cpu(cpc_core_lock_flags, curr_core)); preempt_enable(); }
void mips_cm_unlock_other(void) { unsigned int curr_core; if (mips_cm_revision() < CM_REV_CM3) { curr_core = current_cpu_data.core; spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), per_cpu(cm_core_lock_flags, curr_core)); } else { spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock), *this_cpu_ptr(&cm_core_lock_flags)); } preempt_enable(); }
void mips_cpc_lock_other(unsigned int core) { unsigned int curr_core; if (mips_cm_revision() >= CM_REV_CM3) /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ return; preempt_disable(); curr_core = cpu_core(¤t_cpu_data); spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), per_cpu(cpc_core_lock_flags, curr_core)); write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM)); /* * Ensure the core-other region reflects the appropriate core & * VP before any accesses to it occur. */ mb(); }