u32 vcd_enable_clock(struct vcd_dev_ctxt *dev_ctxt,
	struct vcd_clnt_ctxt *cctxt)
{
	u32 rc = VCD_S_SUCCESS;
	u32 set_perf_lvl;

	if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF) {
		VCD_MSG_ERROR("vcd_enable_clock(): Already in state "
			"VCD_PWRCLK_STATE_OFF\n");
		rc = VCD_ERR_FAIL;
	} else if (dev_ctxt->pwr_clk_state ==
		VCD_PWRCLK_STATE_ON_NOTCLOCKED) {
		set_perf_lvl =
				dev_ctxt->reqd_perf_lvl >
				0 ? dev_ctxt->
				reqd_perf_lvl : VCD_MIN_PERF_LEVEL;
		rc = vcd_set_perf_level(dev_ctxt, set_perf_lvl);
		if (!VCD_FAILED(rc)) {
			if (res_trk_enable_clocks()) {
				dev_ctxt->pwr_clk_state =
					VCD_PWRCLK_STATE_ON_CLOCKED;
			}
		} else {
			rc = VCD_ERR_FAIL;
		}

	}

	if (!VCD_FAILED(rc))
		dev_ctxt->active_clnts++;

	return rc;
}
示例#2
0
static u32 res_trk_enable_pwr_rail(void)
{
	mutex_lock(&resource_context.lock);
	if (clock_enabled == 1) {
		mutex_unlock(&resource_context.lock);
		return true;
	}
	if (!resource_context.rail_enabled) {
		resource_context.vcodec_clk = clk_get(resource_context.device,
			"vcodec_clk");
		if (IS_ERR(resource_context.vcodec_clk)) {
			VCDRES_MSG_ERROR("%s(): vcodec_clk get failed\n"
				, __func__);
			mutex_unlock(&resource_context.lock);
			return false;
		}
		/*TODO: Set clk_rate to lowest value,Currenlty set to highest
		  value during bringup*/
		if (clk_set_rate(resource_context.vcodec_clk,
			vidc_clk_table[0])) {
			VCDRES_MSG_ERROR("set rate failed in power up\n");
			mutex_unlock(&resource_context.lock);
			return false;
		}
		mutex_unlock(&resource_context.lock);
		res_trk_enable_clocks();
		mutex_lock(&resource_context.lock);
	}
	/*TODO: Power rail functions needs to be added*/
	resource_context.rail_enabled = 1;
	clock_enabled = 1;
	mutex_unlock(&resource_context.lock);
	return true;
}
u32 vcd_un_gate_clock(struct vcd_dev_ctxt *dev_ctxt)
{
	u32 rc = VCD_S_SUCCESS;
	if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_OFF ||
		dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) {
		VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet\n", __func__);
		rc = VCD_ERR_FAIL;
	} else if (dev_ctxt->pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED)
		rc = VCD_S_SUCCESS;
	else if (res_trk_enable_clocks())
		dev_ctxt->pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED;
	else
		rc = VCD_ERR_FAIL;
	return rc;
}
示例#4
0
u32 vcd_un_gate_clock(struct vcd_dev_ctxt_type *p_dev_ctxt)
{
	u32 rc = VCD_S_SUCCESS;

	if (p_dev_ctxt->e_pwr_clk_state == VCD_PWRCLK_STATE_OFF ||
		p_dev_ctxt->e_pwr_clk_state == VCD_PWRCLK_STATE_ON_NOTCLOCKED) {
		VCD_MSG_ERROR("%s(): Clk is Off or Not Clked yet \n", __func__);
		vcd_assert();
		return VCD_ERR_FAIL;
	}

	if (p_dev_ctxt->e_pwr_clk_state == VCD_PWRCLK_STATE_ON_CLOCKED)
		return rc;

	if (res_trk_enable_clocks())
		p_dev_ctxt->e_pwr_clk_state = VCD_PWRCLK_STATE_ON_CLOCKED;
	else
		rc = VCD_ERR_FAIL;

	return rc;
}
示例#5
0
u32 vcd_enable_clock(struct vcd_dev_ctxt_type *p_dev_ctxt,
	struct vcd_clnt_ctxt_type_t *p_cctxt)
{
	u32 rc = VCD_S_SUCCESS;
	u32 n_set_perf_lvl;

	if (p_dev_ctxt->e_pwr_clk_state == VCD_PWRCLK_STATE_OFF) {
		VCD_MSG_ERROR("vcd_enable_clock(): Already in state "
			"VCD_PWRCLK_STATE_OFF\n");
		vcd_assert();
		rc = VCD_ERR_FAIL;
	} else if (p_dev_ctxt->e_pwr_clk_state ==
		VCD_PWRCLK_STATE_ON_NOTCLOCKED) {

		n_set_perf_lvl =
				p_dev_ctxt->n_reqd_perf_lvl >
				0 ? p_dev_ctxt->
				n_reqd_perf_lvl : VCD_MIN_PERF_LEVEL;

		rc = vcd_set_perf_level(p_dev_ctxt, n_set_perf_lvl,
			p_cctxt);

		if (!VCD_FAILED(rc)) {
			if (res_trk_enable_clocks()) {
				p_dev_ctxt->e_pwr_clk_state =
					VCD_PWRCLK_STATE_ON_CLOCKED;
			}
		} else {
			rc = VCD_ERR_FAIL;
		}

	}

	if (!VCD_FAILED(rc))
		p_dev_ctxt->n_active_clnts++;

	return rc;
}