/* logging a event related with DSIM */ static inline void disp_ss_event_log_dsim (disp_ss_event_t type, struct v4l2_subdev *sd, ktime_t time) { struct dsim_device *dsim = container_of(sd, struct dsim_device, sd); struct decon_device *decon = get_decon_drvdata(dsim->id); int idx = atomic_inc_return(&decon->disp_ss_log_idx) % DISP_EVENT_LOG_MAX; struct disp_ss_log *log = &decon->disp_ss_log[idx]; if (time.tv64) log->time = time; else log->time = ktime_get(); log->type = type; switch (type) { case DISP_EVT_DSIM_SUSPEND: case DISP_EVT_DSIM_RESUME: case DISP_EVT_ENTER_ULPS: case DISP_EVT_EXIT_ULPS: case DISP_EVT_FRAMEDONE: log->data.pm.pm_status = pm_runtime_active(dsim->dev); log->data.pm.elapsed = ktime_sub(ktime_get(), log->time); break; default: /* Any remaining types will be log just time and type */ break; } }
/* logging a event related with VPP */ static inline void disp_ss_event_log_vpp (disp_ss_event_t type, struct v4l2_subdev *sd, ktime_t time) { struct decon_device *decon = get_decon_drvdata(__get_decon_id_for_vpp(sd)); int idx = atomic_inc_return(&decon->disp_ss_log_idx) % DISP_EVENT_LOG_MAX; struct disp_ss_log *log = &decon->disp_ss_log[idx]; struct vpp_dev *vpp = v4l2_get_subdevdata(sd); if (time.tv64) log->time = time; else log->time = ktime_get(); log->type = type; switch (type) { case DISP_EVT_VPP_SUSPEND: case DISP_EVT_VPP_RESUME: log->data.pm.pm_status = pm_runtime_active(&vpp->pdev->dev); log->data.pm.elapsed = ktime_sub(ktime_get(), log->time); break; case DISP_EVT_VPP_FRAMEDONE: case DISP_EVT_VPP_STOP: case DISP_EVT_VPP_WINCON: log->data.vpp.id = vpp->id; log->data.vpp.start_cnt = vpp->start_count; log->data.vpp.done_cnt = vpp->done_count; break; default: log->data.vpp.id = vpp->id; break; } return; }
static void podgov_set_freq_request(struct device *dev, int freq_request) { struct platform_device *d = to_platform_device(dev); struct nvhost_device_data *pdata = platform_get_drvdata(d); struct devfreq *df = pdata->power_manager; struct podgov_info_rec *podgov; if (!df) return; /* make sure the device is alive before doing any scaling */ nvhost_module_busy_noresume(d); mutex_lock(&df->lock); podgov = df->data; trace_podgov_set_freq_request(freq_request); podgov->p_freq_request = freq_request; /* update the request only if podgov is enabled, device is turned on * and the scaling is in user mode */ if (podgov->enable && podgov->p_user && pm_runtime_active(&d->dev)) { podgov->adjustment_frequency = freq_request; podgov->adjustment_type = ADJUSTMENT_LOCAL; update_devfreq(df); } mutex_unlock(&df->lock); nvhost_module_idle(d); }
int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) { struct a6xx_gmu *gmu = &a6xx_gpu->gmu; struct msm_gpu *gpu = &a6xx_gpu->base.base; if (!pm_runtime_active(gmu->dev)) return 0; /* * Force the GMU off if we detected a hang, otherwise try to shut it * down gracefully */ if (gmu->hung) a6xx_gmu_force_off(gmu); else a6xx_gmu_shutdown(gmu); /* Remove the bus vote */ icc_set_bw(gpu->icc_path, 0, 0); /* * Make sure the GX domain is off before turning off the GMU (CX) * domain. Usually the GMU does this but only if the shutdown sequence * was successful */ if (!IS_ERR_OR_NULL(gmu->gxpd)) pm_runtime_put_sync(gmu->gxpd); clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); pm_runtime_put_sync(gmu->dev); return 0; }
static int mlx90614_pm_suspend(struct device *dev) { struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); struct mlx90614_data *data = iio_priv(indio_dev); if (data->wakeup_gpio && pm_runtime_active(dev)) return mlx90614_sleep(data); return 0; }
static irqreturn_t twl4030_usb_irq(int irq, void *_twl) { struct twl4030_usb *twl = _twl; enum musb_vbus_id_status status; bool status_changed = false; status = twl4030_usb_linkstat(twl); mutex_lock(&twl->lock); if (status >= 0 && status != twl->linkstat) { status_changed = cable_present(twl->linkstat) != cable_present(status); twl->linkstat = status; } mutex_unlock(&twl->lock); if (status_changed) { /* FIXME add a set_power() method so that B-devices can * configure the charger appropriately. It's not always * correct to consume VBUS power, and how much current to * consume is a function of the USB configuration chosen * by the host. * * REVISIT usb_gadget_vbus_connect(...) as needed, ditto * its disconnect() sibling, when changing to/from the * USB_LINK_VBUS state. musb_hdrc won't care until it * starts to handle softconnect right. */ if (cable_present(status)) { pm_runtime_get_sync(twl->dev); } else { pm_runtime_mark_last_busy(twl->dev); pm_runtime_put_autosuspend(twl->dev); } musb_mailbox(status); } /* don't schedule during sleep - irq works right then */ if (status == MUSB_ID_GROUND && pm_runtime_active(twl->dev)) { cancel_delayed_work(&twl->id_workaround_work); schedule_delayed_work(&twl->id_workaround_work, HZ); } if (irq) sysfs_notify(&twl->dev->kobj, NULL, "vbus"); return IRQ_HANDLED; }
static void podgov_enable(struct device *dev, int enable) { struct platform_device *d = to_platform_device(dev); struct nvhost_device_data *pdata = platform_get_drvdata(d); struct devfreq *df = pdata->power_manager; struct podgov_info_rec *podgov; if (!df) return; /* make sure the device is alive before doing any scaling */ nvhost_module_busy_noresume(d); mutex_lock(&df->lock); podgov = df->data; trace_podgov_enabled(enable); /* bad configuration. quit. */ if (df->min_freq == df->max_freq) goto exit_unlock; /* store the enable information */ podgov->enable = enable; /* skip local adjustment if we are enabling or the device is * suspended */ if (enable || !pm_runtime_active(&d->dev)) goto exit_unlock; /* full speed */ podgov->adjustment_frequency = df->max_freq; podgov->adjustment_type = ADJUSTMENT_LOCAL; update_devfreq(df); mutex_unlock(&df->lock); nvhost_module_idle(d); stop_podgov_workers(podgov); return; exit_unlock: mutex_unlock(&df->lock); nvhost_module_idle(d); }
static int omap_mcpdm_suspend(struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); if (dai->active) { omap_mcpdm_stop(mcpdm); omap_mcpdm_close_streams(mcpdm); } mcpdm->pm_active_count = 0; while (pm_runtime_active(mcpdm->dev)) { pm_runtime_put_sync(mcpdm->dev); mcpdm->pm_active_count++; } return 0; }
static void podgov_set_user_ctl(struct device *dev, int user) { struct platform_device *d = to_platform_device(dev); struct nvhost_device_data *pdata = platform_get_drvdata(d); struct devfreq *df = pdata->power_manager; struct podgov_info_rec *podgov; int old_user; if (!df) return; /* make sure the device is alive before doing any scaling */ nvhost_module_busy_noresume(d); mutex_lock(&df->lock); podgov = df->data; trace_podgov_set_user_ctl(user); /* store the new user value */ old_user = podgov->p_user; podgov->p_user = user; /* skip scaling, if scaling (or the whole device) is turned off * - or the scaling already was in user mode */ if (!pm_runtime_active(&d->dev) || !podgov->enable || !(user && !old_user)) goto exit_unlock; /* write request */ podgov->adjustment_frequency = podgov->p_freq_request; podgov->adjustment_type = ADJUSTMENT_LOCAL; update_devfreq(df); mutex_unlock(&df->lock); nvhost_module_idle(d); stop_podgov_workers(podgov); return; exit_unlock: mutex_unlock(&df->lock); nvhost_module_idle(d); }
/* logging a event related with DECON */ static inline void disp_ss_event_log_decon (disp_ss_event_t type, struct v4l2_subdev *sd, ktime_t time) { struct decon_device *decon = container_of(sd, struct decon_device, sd); int idx = atomic_inc_return(&decon->disp_ss_log_idx) % DISP_EVENT_LOG_MAX; struct disp_ss_log *log = &decon->disp_ss_log[idx]; if (time.tv64) log->time = time; else log->time = ktime_get(); log->type = type; switch (type) { case DISP_EVT_DECON_SUSPEND: case DISP_EVT_DECON_RESUME: case DISP_EVT_ENTER_LPD: case DISP_EVT_EXIT_LPD: log->data.pm.pm_status = pm_runtime_active(decon->dev); log->data.pm.elapsed = ktime_sub(ktime_get(), log->time); break; case DISP_EVT_WB_SET_BUFFER: case DISP_EVT_WB_SW_TRIGGER: case DISP_EVT_WB_TIMELINE_INC: case DISP_EVT_WB_FRAME_DONE: log->data.frame.timeline = decon->wb_timeline->value; log->data.frame.timeline_max = decon->wb_timeline_max; break; case DISP_EVT_TE_INTERRUPT: case DISP_EVT_UNDERRUN: case DISP_EVT_LINECNT_ZERO: break; default: /* Any remaining types will be log just time and type */ break; } }
static inline bool ina3221_is_enabled(struct ina3221_data *ina, int channel) { return pm_runtime_active(ina->pm_dev) && (ina->reg_config & INA3221_CONFIG_CHx_EN(channel)); }
static int nvhost_scale3d_set_throughput_hint(struct notifier_block *nb, unsigned long action, void *data) { struct podgov_info_rec *podgov = container_of(nb, struct podgov_info_rec, throughput_hint_notifier); struct devfreq *df; struct platform_device *pdev; int hint = tegra_throughput_get_hint(); long idle; long curr, target; int avg_idle, avg_hint, scale_score; unsigned int smooth; if (!podgov) return NOTIFY_DONE; df = podgov->power_manager; if (!df) return NOTIFY_DONE; pdev = to_platform_device(df->dev.parent); /* make sure the device is alive before doing any scaling */ nvhost_module_busy_noresume(pdev); if (!pm_runtime_active(&pdev->dev)) { nvhost_module_idle(pdev); return 0; } mutex_lock(&podgov->power_manager->lock); podgov->block--; if (!podgov->enable || !podgov->p_use_throughput_hint || podgov->block > 0) goto exit_unlock; trace_podgov_hint(podgov->idle, hint); podgov->last_throughput_hint = ktime_get(); curr = podgov->power_manager->previous_freq; idle = podgov->idle; avg_idle = podgov->idle_avg; smooth = podgov->p_smooth; /* compute averages usings exponential-moving-average */ avg_hint = ((smooth*podgov->hint_avg + hint)/(smooth+1)); podgov->hint_avg = avg_hint; /* set the target using avg_hint and avg_idle */ target = curr; if (avg_hint < podgov->p_hint_lo_limit) { target = freqlist_up(podgov, curr, 1); } else { scale_score = avg_idle + avg_hint; if (scale_score > podgov->p_scaledown_limit) target = freqlist_down(podgov, curr, 1); else if (scale_score < podgov->p_scaleup_limit && hint < podgov->p_hint_hi_limit) target = freqlist_up(podgov, curr, 1); } /* clamp and apply target */ scaling_limit(df, &target); if (target != curr) { podgov->block = podgov->p_smooth; trace_podgov_do_scale(df->previous_freq, target); podgov->adjustment_frequency = target; podgov->adjustment_type = ADJUSTMENT_LOCAL; update_devfreq(df); } trace_podgov_print_target(idle, avg_idle, curr / 1000000, target, hint, avg_hint); exit_unlock: mutex_unlock(&podgov->power_manager->lock); nvhost_module_idle(pdev); return NOTIFY_OK; }
static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; int cmd_idx; int ret; IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), "Command %s: a command is already active!\n", cmd_str)) return -EIO; IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { ret = wait_event_timeout(trans_pcie->d0i3_waitq, pm_runtime_active(&trans_pcie->pci_dev->dev), msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); if (!ret) { IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); return -ETIMEDOUT; } } cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", cmd_str, ret); return ret; } ret = wait_event_timeout(trans_pcie->wait_command_queue, !test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status), HOST_COMPLETE_TIMEOUT); if (!ret) { IWL_ERR(trans, "Error sending %s: time out after %dms.\n", cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", txq->read_ptr, txq->write_ptr); clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", cmd_str); ret = -ETIMEDOUT; iwl_force_nmi(trans); iwl_trans_fw_error(trans); goto cancel; } if (test_bit(STATUS_FW_ERROR, &trans->status)) { IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); dump_stack(); ret = -EIO; goto cancel; } if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); ret = -ERFKILL; goto cancel; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); ret = -EIO; goto cancel; } return 0; cancel: if (cmd->flags & CMD_WANT_SKB) { /* * Cancel the CMD_WANT_SKB flag for the cmd in the * TX cmd queue. Otherwise in case the cmd comes * in later, it will possibly set an invalid * address (cmd->meta.source). */ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; } if (cmd->resp_pkt) { iwl_free_resp(cmd); cmd->resp_pkt = NULL; } return ret; }