static int msm_rpm_set_exclusive(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { DECLARE_COMPLETION_ONSTACK(ack); unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[SEL_MASK_SIZE]; int i; msm_rpm_request_irq_mode.req = req; msm_rpm_request_irq_mode.count = count; msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_irq_mode.done = &ack; spin_lock_irqsave(&msm_rpm_lock, flags); spin_lock(&msm_rpm_irq_lock); BUG_ON(msm_rpm_request); msm_rpm_request = &msm_rpm_request_irq_mode; for (i = 0; i < count; i++) { BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, target_enum(req[i].id), req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_SEL_0), sel_masks, msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); msm_rpm_send_req_interrupt(); spin_unlock(&msm_rpm_irq_lock); spin_unlock_irqrestore(&msm_rpm_lock, flags); wait_for_completion(&ack); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) { //pr_warn("[K] %s: following request is rejected by rpm\n", __func__); for (i = 0; i < count; i++) /*pr_warn("[K] %s: id: %d, value: %d\n", __func__, req[i].id, req[i].value)*/; return -ENOSPC; } else { return 0; } }
/* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_mutex>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { DECLARE_COMPLETION_ONSTACK(ack); unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[SEL_MASK_SIZE]; int i; msm_rpm_request_irq_mode.req = req; msm_rpm_request_irq_mode.count = count; msm_rpm_request_irq_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_irq_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_irq_mode.done = &ack; spin_lock_irqsave(&msm_rpm_lock, flags); spin_lock(&msm_rpm_irq_lock); BUG_ON(msm_rpm_request); msm_rpm_request = &msm_rpm_request_irq_mode; for (i = 0; i < count; i++) { BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, target_enum(req[i].id), req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_SEL_0), sel_masks, msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); #if defined(CONFIG_PANTECH_DEBUG) #if defined(CONFIG_PANTECH_DEBUG_RPM_LOG) //p14291_121102 pantech_debug_rpm_log(1, req->id, req->value); #endif #endif msm_rpm_send_req_interrupt(); spin_unlock(&msm_rpm_irq_lock); spin_unlock_irqrestore(&msm_rpm_lock, flags); wait_for_completion(&ack); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; }
/* Upon return, the <req> array will contain values from the ack page. * * Note: assumes caller has acquired <msm_rpm_lock>. * * Return value: * 0: success * -ENOSPC: request rejected */ static int msm_rpm_set_exclusive_noirq(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { unsigned int irq = msm_rpm_data.irq_ack; unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[SEL_MASK_SIZE]; struct irq_chip *irq_chip, *err_chip; int i; msm_rpm_request_poll_mode.req = req; msm_rpm_request_poll_mode.count = count; msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_poll_mode.done = NULL; spin_lock_irqsave(&msm_rpm_irq_lock, flags); irq_chip = irq_get_chip(irq); if (!irq_chip) { spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } irq_chip->irq_mask(irq_get_irq_data(irq)); err_chip = irq_get_chip(msm_rpm_data.irq_err); if (!err_chip) { irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } err_chip->irq_mask(irq_get_irq_data(msm_rpm_data.irq_err)); if (msm_rpm_request) { msm_rpm_busy_wait_for_request_completion(true); BUG_ON(msm_rpm_request); } msm_rpm_request = &msm_rpm_request_poll_mode; for (i = 0; i < count; i++) { BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, target_enum(req[i].id), req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_SEL_0), sel_masks, msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); #if defined(CONFIG_PANTECH_DEBUG) #if defined(CONFIG_PANTECH_DEBUG_RPM_LOG) //p14291_121102 pantech_debug_rpm_log(1, req->id, req->value); #endif #endif msm_rpm_send_req_interrupt(); msm_rpm_busy_wait_for_request_completion(false); BUG_ON(msm_rpm_request); err_chip->irq_unmask(irq_get_irq_data(msm_rpm_data.irq_err)); irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); return (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) ? -ENOSPC : 0; }
/* * Note: assumes caller has acquired <msm_rpm_irq_lock>. * * Return value: * 0: request acknowledgement * 1: notification * 2: spurious interrupt */ static int msm_rpm_process_ack_interrupt(void) { uint32_t ctx_mask_ack; uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0}; ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_CTX_0)); msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_SEL_0), sel_masks_ack, msm_rpm_sel_mask_size); if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) { struct msm_rpm_notification *n; int i; list_for_each_entry(n, &msm_rpm_notifications, list) for (i = 0; i < msm_rpm_sel_mask_size; i++) if (sel_masks_ack[i] & n->sel_masks[i]) { up(&n->sem); break; } msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_SEL_0), msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0); /* Ensure the write is complete before return */ mb(); return 1; } if (msm_rpm_request) { int i; *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack; memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack, sizeof(sel_masks_ack)); for (i = 0; i < msm_rpm_request->count; i++) msm_rpm_request->req[i].value = msm_rpm_read(MSM_RPM_PAGE_ACK, target_enum(msm_rpm_request->req[i].id)); msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_SEL_0), msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0); /* Ensure the write is complete before return */ mb(); if (msm_rpm_request->done) complete_all(msm_rpm_request->done); #if defined(CONFIG_PANTECH_DEBUG) #if defined(CONFIG_PANTECH_DEBUG_RPM_LOG) //p14291_121102 pantech_debug_rpm_log(0, msm_rpm_request->req->id, msm_rpm_request->req->value); #endif #endif msm_rpm_request = NULL; return 0; } return 2; }
static int msm_rpm_process_ack_interrupt(void) { uint32_t ctx_mask_ack; uint32_t sel_masks_ack[SEL_MASK_SIZE] = {0}; ctx_mask_ack = msm_rpm_read(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_CTX_0)); msm_rpm_read_contiguous(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_SEL_0), sel_masks_ack, msm_rpm_sel_mask_size); if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_NOTIFICATION)) { struct msm_rpm_notification *n; int i; list_for_each_entry(n, &msm_rpm_notifications, list) for (i = 0; i < msm_rpm_sel_mask_size; i++) if (sel_masks_ack[i] & n->sel_masks[i]) { up(&n->sem); break; } msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_SEL_0), msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0); mb(); return 1; } if (msm_rpm_request) { int i; *(msm_rpm_request->ctx_mask_ack) = ctx_mask_ack; memcpy(msm_rpm_request->sel_masks_ack, sel_masks_ack, sizeof(sel_masks_ack)); for (i = 0; i < msm_rpm_request->count; i++) msm_rpm_request->req[i].value = msm_rpm_read(MSM_RPM_PAGE_ACK, target_enum(msm_rpm_request->req[i].id)); msm_rpm_write_contiguous_zeros(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_SEL_0), msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_ACK_CTX_0), 0); mb(); if (msm_rpm_request->done) complete_all(msm_rpm_request->done); msm_rpm_request = NULL; return 0; } return 2; }
static int msm_rpm_set_exclusive_noirq(int ctx, uint32_t *sel_masks, struct msm_rpm_iv_pair *req, int count) { unsigned int irq = msm_rpm_data.irq_ack; unsigned long flags; uint32_t ctx_mask = msm_rpm_get_ctx_mask(ctx); uint32_t ctx_mask_ack = 0; uint32_t sel_masks_ack[SEL_MASK_SIZE]; struct irq_chip *irq_chip, *err_chip; int i; msm_rpm_request_poll_mode.req = req; msm_rpm_request_poll_mode.count = count; msm_rpm_request_poll_mode.ctx_mask_ack = &ctx_mask_ack; msm_rpm_request_poll_mode.sel_masks_ack = sel_masks_ack; msm_rpm_request_poll_mode.done = NULL; spin_lock_irqsave(&msm_rpm_irq_lock, flags); irq_chip = irq_get_chip(irq); if (!irq_chip) { spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } irq_chip->irq_mask(irq_get_irq_data(irq)); err_chip = irq_get_chip(msm_rpm_data.irq_err); if (!err_chip) { irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); return -ENOSPC; } err_chip->irq_mask(irq_get_irq_data(msm_rpm_data.irq_err)); if (msm_rpm_request) { msm_rpm_busy_wait_for_request_completion(true); BUG_ON(msm_rpm_request); } msm_rpm_request = &msm_rpm_request_poll_mode; for (i = 0; i < count; i++) { BUG_ON(target_enum(req[i].id) >= MSM_RPM_ID_LAST); msm_rpm_write(MSM_RPM_PAGE_REQ, target_enum(req[i].id), req[i].value); } msm_rpm_write_contiguous(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_SEL_0), sel_masks, msm_rpm_sel_mask_size); msm_rpm_write(MSM_RPM_PAGE_CTRL, target_ctrl(MSM_RPM_CTRL_REQ_CTX_0), ctx_mask); /* Ensure RPM data is written before sending the interrupt */ mb(); msm_rpm_send_req_interrupt(); msm_rpm_busy_wait_for_request_completion(false); BUG_ON(msm_rpm_request); err_chip->irq_unmask(irq_get_irq_data(msm_rpm_data.irq_err)); irq_chip->irq_unmask(irq_get_irq_data(irq)); spin_unlock_irqrestore(&msm_rpm_irq_lock, flags); BUG_ON((ctx_mask_ack & ~(msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED))) != ctx_mask); BUG_ON(memcmp(sel_masks, sel_masks_ack, sizeof(sel_masks_ack))); if (ctx_mask_ack & msm_rpm_get_ctx_mask(MSM_RPM_CTX_REJECTED)) { //pr_warn("[K] %s: following request is rejected by rpm\n", __func__); for (i = 0; i < count; i++) /*pr_warn("[K] %s: id: %d, value: %d\n", __func__, req[i].id, req[i].value)*/; return -ENOSPC; } else { return 0; } }