/* Returns MBps of read/writes for the sampling window. */ static int mon_get_mbps(int n, u32 start_val, unsigned int us) { u32 overflow, count; long long beats; count = get_l2_indirect_reg(L2PMnEVCNTR(n)); overflow = get_l2_indirect_reg(L2PMOVSR); if (overflow & BIT(n)) beats = 0xFFFFFFFF - start_val + count; else beats = count - start_val; beats *= USEC_PER_SEC; beats *= bytes_per_beat; do_div(beats, us); beats = DIV_ROUND_UP_ULL(beats, MBYTE); pr_debug("EV%d ov: %x, cnt: %x\n", n, overflow, count); return beats; } static void do_bw_sample(struct work_struct *work); static DECLARE_DEFERRED_WORK(bw_sample, do_bw_sample); static struct workqueue_struct *bw_sample_wq; static DEFINE_MUTEX(bw_lock); static ktime_t prev_ts; static u32 prev_r_start_val; static u32 prev_w_start_val; static struct msm_bus_paths bw_levels[] = { BW(0), BW(200), }; static struct msm_bus_scale_pdata bw_data = { .usecase = bw_levels, .num_usecases = ARRAY_SIZE(bw_levels), .name = "cpubw-krait", .active_only = 1, }; static u32 bus_client; static void compute_bw(int mbps); static irqreturn_t mon_intr_handler(int irq, void *dev_id); #define START_LIMIT 100 /* MBps */ static int start_monitoring(void) { int mb_limit; int ret; bw_sample_wq = alloc_workqueue("cpubw-krait", WQ_HIGHPRI, 0); if (!bw_sample_wq) { pr_err("Unable to alloc workqueue\n"); return -ENOMEM; } ret = request_threaded_irq(MON_INT, NULL, mon_intr_handler, IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_RISING, "cpubw_krait", mon_intr_handler); if (ret) { pr_err("Unable to register interrupt handler\n"); return ret; } bus_client = msm_bus_scale_register_client(&bw_data); if (!bus_client) { pr_err("Unable to register bus client\n"); ret = -ENODEV; goto bus_reg_fail; } compute_bw(START_LIMIT); mon_init(); mon_disable(0); mon_disable(1); mb_limit = mult_frac(START_LIMIT, sample_ms, MSEC_PER_SEC); mb_limit /= 2; prev_r_start_val = mon_set_limit_mbyte(0, mb_limit); prev_w_start_val = mon_set_limit_mbyte(1, mb_limit); prev_ts = ktime_get(); set_l2_indirect_reg(L2PMINTENSET, BIT(0)); set_l2_indirect_reg(L2PMINTENSET, BIT(1)); mon_enable(0); mon_enable(1); global_mon_enable(true); queue_delayed_work(bw_sample_wq, &bw_sample, msecs_to_jiffies(sample_ms)); return 0; bus_reg_fail: destroy_workqueue(bw_sample_wq); disable_irq(MON_INT); free_irq(MON_INT, mon_intr_handler); return ret; }
static int devfreq_cpubw_hwmon_get_freq(struct devfreq *df, unsigned long *freq, u32 *flag) { unsigned long mbps; mbps = measure_bw_and_set_irq(); compute_bw(mbps, freq, df->data); prev_ab = *(unsigned long *) df->data; return 0; }
static int devfreq_cpubw_hwmon_get_freq(struct devfreq *df, unsigned long *freq, u32 *flag) { unsigned long mbps; mbps = measure_bw_and_set_irq(); compute_bw(mbps, freq, df->data); if (*freq < msm_cpufreq_get_bw()) *freq = msm_cpufreq_get_bw(); return 0; }
static void measure_bw(void) { int r_mbps, w_mbps, mbps; ktime_t ts; unsigned int us; mutex_lock(&bw_lock); /* * Since we are stopping the counters, we don't want this short work * to be interrupted by other tasks and cause the measurements to be * wrong. Not blocking interrupts to avoid affecting interrupt * latency and since they should be short anyway because they run in * atomic context. */ preempt_disable(); ts = ktime_get(); us = ktime_to_us(ktime_sub(ts, prev_ts)); if (!us) us = 1; mon_disable(0); mon_disable(1); r_mbps = mon_get_mbps(0, prev_r_start_val, us); w_mbps = mon_get_mbps(1, prev_w_start_val, us); prev_r_start_val = mon_set_limit_mbyte(0, to_limit(r_mbps)); prev_w_start_val = mon_set_limit_mbyte(1, to_limit(w_mbps)); mon_enable(0); mon_enable(1); preempt_enable(); mbps = r_mbps + w_mbps; pr_debug("R/W/BW/us = %d/%d/%d/%d\n", r_mbps, w_mbps, mbps, us); compute_bw(mbps); prev_ts = ts; mutex_unlock(&bw_lock); }