void bch_time_stats_update(struct time_stats *stats, uint64_t start_time) { uint64_t now, duration, last; spin_lock(&stats->lock); now = local_clock(); duration = time_after64(now, start_time) ? now - start_time : 0; last = time_after64(now, stats->last) ? now - stats->last : 0; stats->max_duration = max(stats->max_duration, duration); if (stats->last) { ewma_add(stats->average_duration, duration, 8, 8); if (stats->average_frequency) ewma_add(stats->average_frequency, last, 8, 8); else stats->average_frequency = last << 8; } else { stats->average_duration = duration << 8; } stats->last = now ?: 1; spin_unlock(&stats->lock); }
static void my_apm_get_power_status(struct apm_power_info *info) { if (!info) return; if (time_after64(jiffies_64, last_update + speriod * HZ)) { if (!update_params()) { schedule_delayed_work(&battery_work, HZ); } } info->ac_line_status = onAC ? APM_AC_ONLINE : APM_AC_OFFLINE; if (batteryCharging) { info->battery_status = APM_BATTERY_STATUS_CHARGING; } else { if (batteryPresent) { info->battery_status = APM_BATTERY_STATUS_HIGH; } else { info->battery_status = APM_BATTERY_STATUS_NOT_PRESENT; } } info->battery_life = rsoc; info->time = time; info->units = APM_UNITS_MINS; info->battery_flag = (1 << info->battery_status); }
/** * bch_next_delay() - update ratelimiting statistics and calculate next delay * @d: the struct bch_ratelimit to update * @done: the amount of work done, in arbitrary units * * Increment @d by the amount of work done, and return how long to delay in * jiffies until the next time to do some work. */ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) { uint64_t now = local_clock(); d->next += div_u64(done * NSEC_PER_SEC, atomic_long_read(&d->rate)); /* Bound the time. Don't let us fall further than 2 seconds behind * (this prevents unnecessary backlog that would make it impossible * to catch up). If we're ahead of the desired writeback rate, * don't let us sleep more than 2.5 seconds (so we can notice/respond * if the control system tells us to speed up!). */ if (time_before64(now + NSEC_PER_SEC * 5LLU / 2LLU, d->next)) d->next = now + NSEC_PER_SEC * 5LLU / 2LLU; if (time_after64(now - NSEC_PER_SEC * 2, d->next)) d->next = now - NSEC_PER_SEC * 2; return time_after64(d->next, now) ? div_u64(d->next - now, NSEC_PER_SEC / HZ) : 0; }
static int get_property(struct power_supply *b, enum power_supply_property psp, union power_supply_propval *val) { if (time_after64(jiffies_64, last_update + speriod * HZ)) { int retry = 10; while (!update_params()) { msleep(200); if (!--retry) { printk(KERN_ERR "ghi270_power: " "Battery comm fail\n"); break; } } } switch (psp) { case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: val->intval = 100; break; case POWER_SUPPLY_PROP_CURRENT_NOW: val->intval = current_now; break; case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN: val->intval = 0; break; case POWER_SUPPLY_PROP_CHARGE_NOW: val->intval = rsoc; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = voltage; break; case POWER_SUPPLY_PROP_STATUS: if (batteryCharging) { val->intval = POWER_SUPPLY_STATUS_CHARGING; } else { val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; } break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: val->intval = time; break; case POWER_SUPPLY_PROP_TEMP: val->intval = temp; break; default: return -EINVAL; }; return 0; }
static void __update_writeback_rate(struct cached_dev *dc) { struct cache_set *c = dc->disk.c; uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size; uint64_t cache_dirty_target = div_u64(cache_sectors * dc->writeback_percent, 100); int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev), c->cached_dev_sectors); /* PD controller */ int64_t dirty = bcache_dev_sectors_dirty(&dc->disk); int64_t derivative = dirty - dc->disk.sectors_dirty_last; int64_t proportional = dirty - target; int64_t change; dc->disk.sectors_dirty_last = dirty; /* Scale to sectors per second */ proportional *= dc->writeback_rate_update_seconds; proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse); derivative = div_s64(derivative, dc->writeback_rate_update_seconds); derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative, (dc->writeback_rate_d_term / dc->writeback_rate_update_seconds) ?: 1, 0); derivative *= dc->writeback_rate_d_term; derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse); change = proportional + derivative; /* Don't increase writeback rate if the device isn't keeping up */ if (change > 0 && time_after64(local_clock(), dc->writeback_rate.next + NSEC_PER_MSEC)) change = 0; dc->writeback_rate.rate = clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change, 1, NSEC_PER_MSEC); dc->writeback_rate_proportional = proportional; dc->writeback_rate_derivative = derivative; dc->writeback_rate_change = change; dc->writeback_rate_target = target; }
static void power_debounce_timer_func(unsigned long junk) { unsigned long flags; local_irq_save(flags); switch(isr_state) { case PB_IDLE: printk("Unexpected PB_IDLE\n"); break; case PB_DEBOUNCE: if(!pxa_gpio_get_value(GHI270_GPIO0_KEY_nON)) { /* Power is still pressed */ set_irq_type(gpio_to_irq(GHI270_GPIO0_KEY_nON), IRQT_RISING); /* Look for release */ isr_state = PB_DOWN; mod_timer(&power_debounce_timer, jiffies + (pbto * HZ) / 10 + 2); } else { isr_state = PB_IDLE; set_irq_type(gpio_to_irq(GHI270_GPIO0_KEY_nON), IRQT_FALLING); } enable_irq(gpio_to_irq(GHI270_GPIO0_KEY_nON)); break; case PB_DOWN: if(!pxa_gpio_get_value(GHI270_GPIO0_KEY_nON)) { /* Still down, so let's suspend. */ if(time_after64(jiffies_64, off_jiffies64 + msecs_to_jiffies(1500))) { /* Prohibit a bunch of power key events to be queued. */ off_jiffies64 = jiffies_64; input_report_key(keydev, KEY_POWER, 1); input_report_key(keydev, KEY_POWER, 0); input_sync(keydev); } PWM_PWDUTY0 = 1023; /* Turn off the LCD light for feedback */ isr_state = PB_DOWN_POWER_OFF; } else { /* It's no longer down, so be safe and don't do power off. */ isr_state = PB_IDLE; } break; case PB_DOWN_POWER_OFF: printk("State error in pbto.\n"); isr_state = PB_IDLE; break; } local_irq_restore(flags); }
static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc) { bool status_down; uint64_t timeout; uint64_t now; struct mdm_ctrl *mdm = get_esoc_clink_data(esoc); struct device *dev = mdm->dev; switch (notify) { case ESOC_IMG_XFER_DONE: dev_info(dev, "%s ESOC_IMG_XFER_DONE\n", __func__); if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) schedule_delayed_work(&mdm->mdm2ap_status_check_work, msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS)); break; case ESOC_BOOT_DONE: esoc_clink_evt_notify(ESOC_RUN_STATE, esoc); break; case ESOC_IMG_XFER_RETRY: mdm->init = 1; mdm_toggle_soft_reset(mdm); break; case ESOC_IMG_XFER_FAIL: esoc_clink_evt_notify(ESOC_BOOT_FAIL, esoc); break; case ESOC_UPGRADE_AVAILABLE: break; case ESOC_DEBUG_DONE: mdm->debug_fail = false; mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG); complete(&mdm->debug_done); break; case ESOC_DEBUG_FAIL: mdm->debug_fail = true; complete(&mdm->debug_done); break; case ESOC_PRIMARY_CRASH: mdm_disable_irqs(mdm); status_down = false; dev_info(dev, "signal apq err fatal for graceful restart\n"); gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1); timeout = local_clock(); do_div(timeout, NSEC_PER_MSEC); timeout += MDM_MODEM_TIMEOUT; do { if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) { status_down = true; break; } now = local_clock(); do_div(now, NSEC_PER_MSEC); } while (!time_after64(now, timeout)); if (!status_down) { dev_err(mdm->dev, "%s MDM2AP status did not go low\n", __func__); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !!mdm->soft_reset_inverted); /* * allow PS hold assert to be detected. * pmic requires 6ms for crash reset case. */ mdelay(6); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !mdm->soft_reset_inverted); } break; case ESOC_PRIMARY_REBOOT: dev_info(mdm->dev, "Triggering mdm cold reset"); mdm->ready = 0; gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !!mdm->soft_reset_inverted); mdelay(300); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !mdm->soft_reset_inverted); break; }; return; }
static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc) { bool status_down; uint64_t timeout; uint64_t now; struct mdm_ctrl *mdm = get_esoc_clink_data(esoc); struct device *dev = mdm->dev; int ret; int max_spin = 20; switch (notify) { case ESOC_IMG_XFER_DONE: dev_info(dev, "%s ESOC_IMG_XFER_DONE\n", __func__); if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) schedule_delayed_work(&mdm->mdm2ap_status_check_work, msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS)); break; case ESOC_BOOT_DONE: esoc_clink_evt_notify(ESOC_RUN_STATE, esoc); break; case ESOC_IMG_XFER_RETRY: mdm->init = 1; mdm_toggle_soft_reset(mdm); break; case ESOC_IMG_XFER_FAIL: esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc); break; case ESOC_BOOT_FAIL: esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc); break; case ESOC_UPGRADE_AVAILABLE: break; case ESOC_DEBUG_DONE: mdm->debug_fail = false; mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG); complete(&mdm->debug_done); break; case ESOC_DEBUG_FAIL: mdm->debug_fail = true; complete(&mdm->debug_done); break; case ESOC_PRIMARY_CRASH: mdm_disable_irqs(mdm); status_down = false; dev_info(dev, "signal apq err fatal for graceful restart\n"); gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1); gpio_set_value(MDM_GPIO(mdm, AP2MDM_VDDMIN), 1); timeout = local_clock(); do_div(timeout, NSEC_PER_MSEC); timeout += MDM_MODEM_TIMEOUT; do { if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) { status_down = true; break; } now = local_clock(); do_div(now, NSEC_PER_MSEC); } while (!time_after64(now, timeout)); if (!status_down) { dev_err(mdm->dev, "%s MDM2AP status did not go low\n", __func__); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !!mdm->soft_reset_inverted); /* * allow PS hold assert to be detected. * pmic requires 6ms for crash reset case. */ mdelay(6); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !mdm->soft_reset_inverted); } break; case ESOC_PRIMARY_REBOOT: exynos_pcie_disable_irq(0); mdm_disable_irqs(mdm); dev_info(mdm->dev, "Triggering mdm cold reset"); mdm->ready = 0; while (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) && max_spin--) { msleep(100); dev_info(mdm->dev, "gpio_get_value(MDM2AP_STATUS) : %d\n", gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS))); } gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !!mdm->soft_reset_inverted); mdelay(300); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), !mdm->soft_reset_inverted); break; case ESOC_DIAG_DISABLE: dev_info(mdm->dev, "Send diag_disable noti\n"); ret = sysmon_send_diag_disable_noti(mdm->sysmon_subsys_id); if (ret < 0) dev_err(mdm->dev, "sending diag_disable noti is failed, ret = %d\n", ret); else dev_info(mdm->dev, "sending diag_disable noti is succeed.\n"); break; case ESOC_FORCE_CPCRASH: dev_err(mdm->dev, "Force CP Crash\n"); gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1); gpio_set_value(MDM_GPIO(mdm, AP2MDM_VDDMIN), 1); break; case ESOC_CP_SILENT_RESET: dev_err(mdm->dev, "Force CP Silent Reset\n"); set_silent_reset(); gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1); gpio_set_value(MDM_GPIO(mdm, AP2MDM_VDDMIN), 1); break; }; return; }
static int rt5501_headset_detect(int on) { if(on) { pr_info("%s: headset in ++\n",__func__); mutex_lock(&rt5501_query.mlock); if(rt5501_query.headsetom == HEADSET_OM_UNDER_DETECT || \ time_after64(get_jiffies_64(),last_hp_remove + msecs_to_jiffies(500))) { rt5501_query.hs_qstatus = RT5501_QUERY_HEADSET; rt5501_query.headsetom = HEADSET_OM_UNDER_DETECT; } else { rt5501_query.hs_qstatus = RT5501_QUERY_FINISH; } mutex_unlock(&rt5501_query.mlock); cancel_delayed_work_sync(&rt5501_query.hs_imp_detec_work); mutex_lock(&rt5501_query.gpiolock); mutex_lock(&rt5501_query.mlock); if(rt5501_query.rt5501_status == RT5501_PLAYBACK) { if(high_imp) { rt5501_write_reg(1,0x7); rt5501_write_reg(0xb1,0x81); } else { rt5501_write_reg(1,0xc7); } last_spkamp_state = 0; pr_info("%s: OFF\n", __func__); rt5501_query.rt5501_status = RT5501_SUSPEND; } pr_info("%s: headset in --\n",__func__); mutex_unlock(&rt5501_query.mlock); mutex_unlock(&rt5501_query.gpiolock); queue_delayed_work(hs_wq,&rt5501_query.hs_imp_detec_work,msecs_to_jiffies(5)); pr_info("%s: headset in --2\n",__func__); } else { pr_info("%s: headset remove ++\n",__func__); flush_work_sync(&rt5501_query.volume_ramp_work.work); mutex_lock(&rt5501_query.mlock); rt5501_query.hs_qstatus = RT5501_QUERY_OFF; mutex_unlock(&rt5501_query.mlock); cancel_delayed_work_sync(&rt5501_query.hs_imp_detec_work); mutex_lock(&rt5501_query.gpiolock); mutex_lock(&rt5501_query.mlock); if(rt5501_query.rt5501_status == RT5501_PLAYBACK) { if(high_imp) { rt5501_write_reg(1,0x7); rt5501_write_reg(0xb1,0x81); } else { rt5501_write_reg(1,0xc7); } last_spkamp_state = 0; pr_info("%s: OFF\n", __func__); rt5501_query.rt5501_status = RT5501_SUSPEND; } rt5501_query.curmode = RT5501_MODE_OFF; pr_info("%s: headset remove --1\n",__func__); if(high_imp) { int closegpio = 0; if((rt5501_query.gpiostatus == AMP_GPIO_OFF) && pdata->gpio_rt5501_spk_en) { if(rt5501_query.s4status == AMP_S4_AUTO) { pm8921_aud_set_s4_pwm(); rt5501_query.s4status = AMP_S4_PWM; msleep(1); } pr_info("%s: enable gpio %d\n",__func__,pdata->gpio_rt5501_spk_en); gpio_direction_output(pdata->gpio_rt5501_spk_en, 1); rt5501_query.gpiostatus = AMP_GPIO_ON; closegpio = 1; msleep(1); } pr_info("%s: reset rt5501\n",__func__); rt5501_write_reg(0x0,0x4); mdelay(1); rt5501_write_reg(0x1,0xc7); high_imp = 0; if(closegpio && (rt5501_query.gpiostatus == AMP_GPIO_ON) && pdata->gpio_rt5501_spk_en) { pr_info("%s: disable gpio %d\n",__func__,pdata->gpio_rt5501_spk_en); gpio_direction_output(pdata->gpio_rt5501_spk_en, 0); rt5501_query.gpiostatus = AMP_GPIO_OFF; if(rt5501_query.s4status == AMP_S4_PWM) { pm8921_aud_set_s4_auto(); rt5501_query.s4status = AMP_S4_AUTO; } } } last_hp_remove = get_jiffies_64(); mutex_unlock(&rt5501_query.mlock); mutex_unlock(&rt5501_query.gpiolock); pr_info("%s: headset remove --2\n",__func__); } return 0; }