static void mon_irq_enable(int n, bool en) { if (en) set_l2_indirect_reg(L2PMINTENSET, BIT(n)); else set_l2_indirect_reg(L2PMINTENCLR, BIT(n)); }
static void stop_monitoring(void) { global_mon_enable(false); mon_disable(0); mon_disable(1); set_l2_indirect_reg(L2PMINTENCLR, BIT(0)); set_l2_indirect_reg(L2PMINTENCLR, BIT(1)); disable_irq(MON_INT); free_irq(MON_INT, mon_intr_handler); cancel_delayed_work_sync(&bw_sample); destroy_workqueue(bw_sample_wq); bw_levels[0].vectors[0].ib = 0; bw_levels[0].vectors[0].ab = 0; bw_levels[0].vectors[1].ib = 0; bw_levels[0].vectors[1].ab = 0; bw_levels[1].vectors[0].ib = 0; bw_levels[1].vectors[0].ab = 0; bw_levels[1].vectors[1].ib = 0; bw_levels[1].vectors[1].ab = 0; msm_bus_scale_unregister_client(bus_client); }
static void mon_enable(int n) { set_l2_indirect_reg(L2PMOVSR, BIT(n)); set_l2_indirect_reg(L2PMCNTENSET, BIT(n)); }
static void mon_enable(int n) { /* Clear previous overflow state for event counter n */ set_l2_indirect_reg(L2PMOVSR, BIT(n)); /* Enable event counter n */ set_l2_indirect_reg(L2PMCNTENSET, BIT(n)); }
/* Returns MBps of read/writes for the sampling window. */ static int mon_get_mbps(int n, u32 start_val, unsigned int us) { u32 overflow, count; long long beats; count = get_l2_indirect_reg(L2PMnEVCNTR(n)); overflow = get_l2_indirect_reg(L2PMOVSR); if (overflow & BIT(n)) beats = 0xFFFFFFFF - start_val + count; else beats = count - start_val; beats *= USEC_PER_SEC; beats *= bytes_per_beat; do_div(beats, us); beats = DIV_ROUND_UP_ULL(beats, MBYTE); pr_debug("EV%d ov: %x, cnt: %x\n", n, overflow, count); return beats; } static void do_bw_sample(struct work_struct *work); static DECLARE_DEFERRED_WORK(bw_sample, do_bw_sample); static struct workqueue_struct *bw_sample_wq; static DEFINE_MUTEX(bw_lock); static ktime_t prev_ts; static u32 prev_r_start_val; static u32 prev_w_start_val; static struct msm_bus_paths bw_levels[] = { BW(0), BW(200), }; static struct msm_bus_scale_pdata bw_data = { .usecase = bw_levels, .num_usecases = ARRAY_SIZE(bw_levels), .name = "cpubw-krait", .active_only = 1, }; static u32 bus_client; static void compute_bw(int mbps); static irqreturn_t mon_intr_handler(int irq, void *dev_id); #define START_LIMIT 100 /* MBps */ static int start_monitoring(void) { int mb_limit; int ret; bw_sample_wq = alloc_workqueue("cpubw-krait", WQ_HIGHPRI, 0); if (!bw_sample_wq) { pr_err("Unable to alloc workqueue\n"); return -ENOMEM; } ret = request_threaded_irq(MON_INT, NULL, mon_intr_handler, IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_RISING, "cpubw_krait", mon_intr_handler); if (ret) { pr_err("Unable to register interrupt handler\n"); return ret; } bus_client = msm_bus_scale_register_client(&bw_data); if (!bus_client) { pr_err("Unable to register bus client\n"); ret = -ENODEV; goto bus_reg_fail; } compute_bw(START_LIMIT); mon_init(); mon_disable(0); mon_disable(1); mb_limit = mult_frac(START_LIMIT, sample_ms, MSEC_PER_SEC); mb_limit /= 2; prev_r_start_val = mon_set_limit_mbyte(0, mb_limit); prev_w_start_val = mon_set_limit_mbyte(1, mb_limit); prev_ts = ktime_get(); set_l2_indirect_reg(L2PMINTENSET, BIT(0)); set_l2_indirect_reg(L2PMINTENSET, BIT(1)); mon_enable(0); mon_enable(1); global_mon_enable(true); queue_delayed_work(bw_sample_wq, &bw_sample, msecs_to_jiffies(sample_ms)); return 0; bus_reg_fail: destroy_workqueue(bw_sample_wq); disable_irq(MON_INT); free_irq(MON_INT, mon_intr_handler); return ret; }
static void mon_init(void) { set_l2_indirect_reg(L2PMRESR2, 0x8B0B0000); set_l2_indirect_reg(L2PMnEVCNTCR(RD_MON), 0x0); set_l2_indirect_reg(L2PMnEVCNTCR(WR_MON), 0x0); set_l2_indirect_reg(L2PMnEVCNTR(RD_MON), 0xFFFFFFFF); set_l2_indirect_reg(L2PMnEVCNTR(WR_MON), 0xFFFFFFFF); set_l2_indirect_reg(L2PMnEVFILTER(RD_MON), 0xF003F); set_l2_indirect_reg(L2PMnEVFILTER(WR_MON), 0xF003F); set_l2_indirect_reg(L2PMnEVTYPER(RD_MON), 0xA); set_l2_indirect_reg(L2PMnEVTYPER(WR_MON), 0xB); }
static void mon_init(void) { /* Set up counters 0/1 to count write/read beats */ set_l2_indirect_reg(L2PMRESR2, 0x8B0B0000); set_l2_indirect_reg(L2PMnEVCNTCR(0), 0x0); set_l2_indirect_reg(L2PMnEVCNTCR(1), 0x0); set_l2_indirect_reg(L2PMnEVCNTR(0), 0xFFFFFFFF); set_l2_indirect_reg(L2PMnEVCNTR(1), 0xFFFFFFFF); set_l2_indirect_reg(L2PMnEVFILTER(0), 0xF003F); set_l2_indirect_reg(L2PMnEVFILTER(1), 0xF003F); set_l2_indirect_reg(L2PMnEVTYPER(0), 0xA); set_l2_indirect_reg(L2PMnEVTYPER(1), 0xB); }
void clear_l2cache_err(void) { unsigned int val; val = get_l2_indirect_reg(L2ESR_IND_ADDR); #ifdef CONFIG_IPQ_REPORT_L2ERR report_l2err(val); #endif set_l2_indirect_reg(L2ESR_IND_ADDR, val); }
static u32 mon_set_limit_mbyte(int n, unsigned int mbytes) { u32 regval, beats; beats = mult_frac(mbytes, SZ_1M, bytes_per_beat); regval = 0xFFFFFFFF - beats; set_l2_indirect_reg(L2PMnEVCNTR(n), regval); pr_debug("EV%d MB: %d, start val: %x\n", n, mbytes, regval); return regval; }
static void global_mon_enable(bool en) { u32 regval; regval = get_l2_indirect_reg(L2PMCR); if (en) regval |= BIT(0); else regval &= ~BIT(0); set_l2_indirect_reg(L2PMCR, regval); }
static void __set_pri_clk_src(struct scalable *sc, u32 pri_src_sel) { u32 regval; regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); regval &= ~0x3; regval |= (pri_src_sel & 0x3); set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); /* Wait for switch to complete. */ mb(); udelay(1); }
static void __set_pri_clk_src(struct scalable *sc, u32 pri_src_sel) { u32 regval; regval = get_l2_indirect_reg(sc->l2cpmr_iaddr); regval &= ~0x3; regval |= pri_src_sel; if (sc != &drv.scalable[L2]) { regval &= ~(0x3 << 8); regval |= pri_src_sel << 8; } set_l2_indirect_reg(sc->l2cpmr_iaddr, regval); /* Wait for switch to complete. */ mb(); udelay(1); }
static void mon_disable(int n) { set_l2_indirect_reg(L2PMCNTENCLR, BIT(n)); }
static void mon_disable(int n) { /* Disable event counter n */ set_l2_indirect_reg(L2PMCNTENCLR, BIT(n)); }