static unsigned long meas_bw_and_set_irq(struct bw_hwmon *hw, unsigned int tol, unsigned int us) { unsigned long mbps; u32 limit; unsigned int sample_ms = hw->df->profile->polling_ms; struct bwmon *m = to_bwmon(hw); mon_disable(m); mbps = mon_get_count(m); mbps = bytes_to_mbps(mbps, us); /* * If the counter wraps on thres, don't set the thres too low. * Setting it too low runs the risk of the counter wrapping around * multiple times before the IRQ is processed. */ if (likely(!m->spec->wrap_on_thres)) limit = mbps_to_bytes(mbps, sample_ms, tol); else limit = mbps_to_bytes(max(mbps, 400UL), sample_ms, tol); mon_set_limit(m, limit); mon_clear(m); mon_irq_clear(m); mon_enable(m); dev_dbg(m->dev, "MBps = %lu\n", mbps); return mbps; }
static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps) { struct bwmon *m = to_bwmon(hw); u32 limit; int ret; ret = request_threaded_irq(m->irq, NULL, bwmon_intr_handler, IRQF_ONESHOT | IRQF_SHARED, dev_name(m->dev), m); if (ret) { dev_err(m->dev, "Unable to register interrupt handler! (%d)\n", ret); return ret; } mon_disable(m); limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0); mon_set_limit(m, limit); mon_clear(m); mon_irq_clear(m); mon_irq_enable(m); mon_enable(m); return 0; }
static unsigned long meas_bw_and_set_irq(struct bw_hwmon *hw, unsigned int tol, unsigned int us) { unsigned long mbps; u32 limit; unsigned int sample_ms = hw->df->profile->polling_ms; struct bwmon *m = to_bwmon(hw); mon_disable(m); mbps = mon_get_count(m); mbps = bytes_to_mbps(mbps, us); /* + 1024 is to workaround HW design issue. Needs further tuning. */ limit = mbps_to_bytes(mbps + 1024, sample_ms, tol); mon_set_limit(m, limit); mon_clear(m); mon_irq_clear(m); mon_enable(m); dev_dbg(m->dev, "MBps = %lu\n", mbps); return mbps; }
static unsigned long get_bw_and_set_irq(struct hwmon_node *node, unsigned long *freq, unsigned long *ab) { unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps; unsigned long meas_mbps_zone; unsigned long hist_lo_tol, hyst_lo_tol; struct bw_hwmon *hw = node->hw; unsigned int new_bw, io_percent; ktime_t ts; unsigned int ms; spin_lock_irqsave(&irq_lock, flags); ts = ktime_get(); ms = ktime_to_ms(ktime_sub(ts, node->prev_ts)); if (!node->sampled || ms >= node->sample_ms) __bw_hwmon_sample_end(node->hw); node->sampled = false; req_mbps = meas_mbps = node->max_mbps; node->max_mbps = 0; hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100; /* Remember historic peak in the past hist_mem decision windows. */ if (meas_mbps > node->hist_max_mbps || !node->hist_mem) { /* If new max or no history */ node->hist_max_mbps = meas_mbps; node->hist_mem = node->hist_memory; } else if (meas_mbps >= hist_lo_tol) { /* * If subsequent peaks come close (within tolerance) to but * less than the historic peak, then reset the history start, * but not the peak value. */ node->hist_mem = node->hist_memory; } else { /* Count down history expiration. */ if (node->hist_mem) node->hist_mem--; } /* Keep track of whether we are in low power mode consistently. */ if (meas_mbps > node->low_power_ceil_mbps) node->above_low_power = node->low_power_delay; if (node->above_low_power) node->above_low_power--; if (node->above_low_power) io_percent = node->io_percent; else io_percent = node->low_power_io_percent; /* * The AB value that corresponds to the lowest mbps zone greater than * or equal to the "frequency" the current measurement will pick. * This upper limit is useful for balancing out any prediction * mechanisms to be power friendly. */ meas_mbps_zone = (meas_mbps * 100) / io_percent; meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone); meas_mbps_zone = (meas_mbps_zone * io_percent) / 100; meas_mbps_zone = max(meas_mbps, meas_mbps_zone); /* * If this is a wake up due to BW increase, vote much higher BW than * what we measure to stay ahead of increasing traffic and then set * it up to vote for measured BW if we see down_count short sample * windows of low traffic. */ if (node->wake == UP_WAKE) { req_mbps += ((meas_mbps - node->prev_req) * node->up_scale) / 100; /* * However if the measured load is less than the historic * peak, but the over request is higher than the historic * peak, then we could limit the over requesting to the * historic peak. */ if (req_mbps > node->hist_max_mbps && meas_mbps < node->hist_max_mbps) req_mbps = node->hist_max_mbps; req_mbps = min(req_mbps, meas_mbps_zone); } hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100; if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) { hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100; node->hyst_peak = 0; node->hyst_trig_win = node->hyst_length; node->hyst_mbps = meas_mbps; } /* * Check node->max_mbps to avoid double counting peaks that cause * early termination of a window. */ if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS && !node->max_mbps) { node->hyst_peak++; if (node->hyst_peak >= node->hyst_trigger_count || node->hyst_en) node->hyst_en = node->hyst_length; } if (node->hyst_trig_win) node->hyst_trig_win--; if (node->hyst_en) node->hyst_en--; if (!node->hyst_trig_win && !node->hyst_en) { node->hyst_peak = 0; node->hyst_mbps = 0; } if (node->hyst_en) { if (meas_mbps > node->idle_mbps) req_mbps = max(req_mbps, node->hyst_mbps); } /* Stretch the short sample window size, if the traffic is too low */ if (meas_mbps < MIN_MBPS) { node->up_wake_mbps = (max(MIN_MBPS, req_mbps) * (100 + node->up_thres)) / 100; node->down_wake_mbps = 0; thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2), node->sample_ms); } else { /* * Up wake vs down wake are intentionally a percentage of * req_mbps vs meas_mbps to make sure the over requesting * phase is handled properly. We only want to wake up and * reduce the vote based on the measured mbps being less than * the previous measurement that caused the "over request". */ node->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100; node->down_wake_mbps = (meas_mbps * node->down_thres) / 100; thres = mbps_to_bytes(meas_mbps, node->sample_ms); } node->down_cnt = node->down_count; node->bytes = hw->set_thres(hw, thres); node->wake = 0; node->prev_req = req_mbps; spin_unlock_irqrestore(&irq_lock, flags); adj_mbps = req_mbps + node->guard_band_mbps; if (adj_mbps > node->prev_ab) { new_bw = adj_mbps; } else { new_bw = adj_mbps * node->decay_rate + node->prev_ab * (100 - node->decay_rate); new_bw /= 100; } node->prev_ab = new_bw; if (ab) *ab = roundup(new_bw, node->bw_step); *freq = (new_bw * 100) / io_percent; return req_mbps; }