/* * Return value: * 0: success * -EINVAL: invalid <ctx> or invalid id in <req> array * -ENODEV: RPM driver not initialized. */ static int msm_rpm_clear_common( int ctx, struct msm_rpm_iv_pair *req, int count, bool noirq) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; struct msm_rpm_iv_pair r[MSM_RPM_SEL_MASK_SIZE]; int rc; int i; if (!msm_rpm_platform) { if (cpu_is_apq8064()) return 0; else return -ENODEV; } if (ctx >= MSM_RPM_CTX_SET_COUNT) { rc = -EINVAL; goto clear_common_exit; } rc = msm_rpm_fill_sel_masks(sel_masks, req, count); if (rc) goto clear_common_exit; for (i = 0; i < ARRAY_SIZE(r); i++) { r[i].id = MSM_RPM_ID_INVALIDATE_0 + i; r[i].value = sel_masks[i]; } memset(sel_masks, 0, sizeof(sel_masks)); sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_INVALIDATE)] |= msm_rpm_get_sel_mask(MSM_RPM_SEL_INVALIDATE); if (noirq) { unsigned long flags; spin_lock_irqsave(&msm_rpm_lock, flags); rc = msm_rpm_set_exclusive_noirq(ctx, sel_masks, r, ARRAY_SIZE(r)); spin_unlock_irqrestore(&msm_rpm_lock, flags); BUG_ON(rc); } else { mutex_lock(&msm_rpm_mutex); rc = msm_rpm_set_exclusive(ctx, sel_masks, r, ARRAY_SIZE(r)); mutex_unlock(&msm_rpm_mutex); BUG_ON(rc); } clear_common_exit: return rc; }
/* * Note: assumes caller has acquired <msm_rpm_mutex>. */ static void msm_rpm_update_notification(uint32_t ctx, struct msm_rpm_notif_config *curr_cfg, struct msm_rpm_notif_config *new_cfg) { if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) { uint32_t sel_masks[MSM_RPM_SEL_MASK_SIZE] = {}; int rc; sel_masks[msm_rpm_get_sel_mask_reg(MSM_RPM_SEL_NOTIFICATION)] |= msm_rpm_get_sel_mask(MSM_RPM_SEL_NOTIFICATION); rc = msm_rpm_set_exclusive(ctx, sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv)); BUG_ON(rc); memcpy(curr_cfg, new_cfg, sizeof(*new_cfg)); } }
/* * Note: the function does not clear the masks before filling them. * * Return value: * 0: success * -EINVAL: invalid id in <req> array */ static int msm_rpm_fill_sel_masks( uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { uint32_t sel; int i; for (i = 0; i < count; i++) { sel = msm_rpm_map_id_to_sel(req[i].id); if (sel > MSM_RPM_SEL_LAST) return -EINVAL; sel_masks[msm_rpm_get_sel_mask_reg(sel)] |= msm_rpm_get_sel_mask(sel); } return 0; }
/* * Note: assumes caller has acquired <msm_rpm_mutex>. */ static void msm_rpm_update_notification(uint32_t ctx, struct msm_rpm_notif_config *curr_cfg, struct msm_rpm_notif_config *new_cfg) { unsigned int sel_notif = msm_rpm_data.sel_notification; if (memcmp(curr_cfg, new_cfg, sizeof(*new_cfg))) { uint32_t sel_masks[SEL_MASK_SIZE] = {}; int rc; sel_masks[msm_rpm_get_sel_mask_reg(sel_notif)] |= msm_rpm_get_sel_mask(sel_notif); rc = msm_rpm_set_exclusive(ctx, sel_masks, new_cfg->iv, ARRAY_SIZE(new_cfg->iv)); BUG_ON(rc); memcpy(curr_cfg, new_cfg, sizeof(*new_cfg)); } }
/* * Note: the function does not clear the masks before filling them. * * Return value: * 0: success * -EINVAL: invalid id in <req> array */ static int msm_rpm_fill_sel_masks( uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { uint32_t sel; int i; for (i = 0; i < count; i++) { sel = msm_rpm_map_id_to_sel(req[i].id); if (sel > msm_rpm_data.sel_last) { pr_err("%s(): RPM ID %d not defined for target\n", __func__, req[i].id); return -EINVAL; } sel_masks[msm_rpm_get_sel_mask_reg(sel)] |= msm_rpm_get_sel_mask(sel); } return 0; }