static irqreturn_t mon_intr_handler(int irq, void *dev) { struct devfreq *df = dev; ktime_t ts; unsigned int us; u32 regval; int ret; regval = get_l2_indirect_reg(L2PMOVSR); pr_debug("Got interrupt: %x\n", regval); devfreq_monitor_stop(df); ts = ktime_get(); us = ktime_to_us(ktime_sub(ts, prev_ts)); if (us > TOO_SOON_US) { mutex_lock(&df->lock); ret = update_devfreq(df); if (ret) pr_err("Unable to update freq on IRQ!\n"); mutex_unlock(&df->lock); } devfreq_monitor_start(df); return IRQ_HANDLED; }
static int devfreq_throughput_handler(struct devfreq *devfreq, unsigned int event, void *data) { switch (event) { case DEVFREQ_GOV_START: devfreq_monitor_start(devfreq); break; case DEVFREQ_GOV_STOP: devfreq_monitor_stop(devfreq); break; case DEVFREQ_GOV_INTERVAL: devfreq_interval_update(devfreq, (unsigned int *)data); break; case DEVFREQ_GOV_SUSPEND: devfreq_monitor_suspend(devfreq); break; case DEVFREQ_GOV_RESUME: devfreq_monitor_resume(devfreq); break; default: break; } return 0; }
static int start_monitor(struct devfreq *df, bool init) { struct hwmon_node *node = df->data; struct bw_hwmon *hw = node->hw; struct device *dev = df->dev.parent; unsigned long mbps; int ret; node->prev_ts = ktime_get(); if (init) { node->prev_ab = 0; node->resume_freq = 0; node->resume_ab = 0; mbps = (df->previous_freq * node->io_percent) / 100; ret = hw->start_hwmon(hw, mbps); } else { ret = hw->resume_hwmon(hw); } if (ret) { dev_err(dev, "Unable to start HW monitor! (%d)\n", ret); return ret; } if (init) devfreq_monitor_start(df); else devfreq_monitor_resume(df); node->mon_started = true; return 0; }
int update_bw_hwmon(struct bw_hwmon *hwmon) { struct devfreq *df; struct hwmon_node *node; int ret; if (!hwmon) return -EINVAL; df = hwmon->df; if (!df) return -ENODEV; node = df->data; if (!node) return -ENODEV; if (!node->mon_started) return -EBUSY; dev_dbg(df->dev.parent, "Got update request\n"); devfreq_monitor_stop(df); mutex_lock(&df->lock); ret = update_devfreq(df); if (ret) dev_err(df->dev.parent, "Unable to update freq on request!\n"); mutex_unlock(&df->lock); devfreq_monitor_start(df); return 0; }
static int devfreq_cpubw_hwmon_ev_handler(struct devfreq *df, unsigned int event, void *data) { int ret; switch (event) { case DEVFREQ_GOV_START: ret = start_monitoring(df); if (ret) return ret; ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group); if (ret) return ret; devfreq_monitor_start(df); pr_debug("Enabled CPU BW HW monitor governor\n"); break; case DEVFREQ_GOV_STOP: sysfs_remove_group(&df->dev.kobj, &dev_attr_group); devfreq_monitor_stop(df); *(unsigned long *)df->data = 0; stop_monitoring(df); pr_debug("Disabled CPU BW HW monitor governor\n"); break; case DEVFREQ_GOV_INTERVAL: devfreq_interval_update(df, (unsigned int *)data); break; } return 0; }
static int devfreq_simple_usage_handler(struct devfreq *devfreq, unsigned int event, void *data) { int ret; switch (event) { case DEVFREQ_GOV_START: ret = devfreq_simple_usage_register_notifier(devfreq); if (ret) return ret; devfreq_monitor_start(devfreq); break; case DEVFREQ_GOV_STOP: devfreq_monitor_stop(devfreq); ret = devfreq_simple_usage_unregister_notifier(devfreq); if (ret) return ret; break; case DEVFREQ_GOV_INTERVAL: devfreq_interval_update(devfreq, (unsigned int*)data); break; case DEVFREQ_GOV_SUSPEND: devfreq_monitor_suspend(devfreq); break; case DEVFREQ_GOV_RESUME: devfreq_monitor_resume(devfreq); break; default: break; } return 0; }
static irqreturn_t mon_intr_handler(int irq, void *dev) { struct devfreq *df = dev; ktime_t ts; unsigned int us; u32 regval; int ret; regval = get_l2_indirect_reg(L2PMOVSR); pr_debug("Got interrupt: %x\n", regval); devfreq_monitor_stop(df); /* * Don't recalc bandwidth if the interrupt comes right after a * previous bandwidth calculation. This is done for two reasons: * * 1. Sampling the BW during a very short duration can result in a * very inaccurate measurement due to very short bursts. * 2. This can only happen if the limit was hit very close to the end * of the previous sample period. Which means the current BW * estimate is not very off and doesn't need to be readjusted. */ ts = ktime_get(); us = ktime_to_us(ktime_sub(ts, prev_ts)); if (us > TOO_SOON_US) { mutex_lock(&df->lock); ret = update_devfreq(df); if (ret) pr_err("Unable to update freq on IRQ!\n"); mutex_unlock(&df->lock); } devfreq_monitor_start(df); return IRQ_HANDLED; }