Example #1
0
int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client)
{
    int rc = 0;
    mutex_lock(&bandwidth_mgr_mutex);
    isp_bandwidth_mgr.client_info[client].active = 1;
    if (isp_bandwidth_mgr.use_count++) {
        mutex_unlock(&bandwidth_mgr_mutex);
        return rc;
    }
    isp_bandwidth_mgr.bus_client =
        msm_bus_scale_register_client(&msm_isp_bus_client_pdata);
    if (!isp_bandwidth_mgr.bus_client) {
        pr_err("%s: client register failed\n", __func__);
        mutex_unlock(&bandwidth_mgr_mutex);
        return -EINVAL;
    }

    isp_bandwidth_mgr.bus_vector_active_idx = 1;
    msm_bus_scale_client_update_request(
        isp_bandwidth_mgr.bus_client,
        isp_bandwidth_mgr.bus_vector_active_idx);

    mutex_unlock(&bandwidth_mgr_mutex);
    return 0;
}
int vpu_bus_init(struct vpu_platform_resources *res)
{
	struct vpu_bus_ctrl *ctrl;
	int rc = 0;

	if (!res || res->bus_table.count == 0)
		return -EINVAL;

	ctrl = &g_vpu_bus_ctrl;
	ctrl->btabl = &res->bus_table;
	if (ctrl->bus_client)
		return 0;

	ctrl->bus_client = msm_bus_scale_register_client(&res->bus_pdata);
	if (!ctrl->bus_client) {
		pr_err("Failed to register bus scale client\n");
		goto err_init_bus;
	}

	return rc;

err_init_bus:
	vpu_bus_deinit();
	return -EINVAL;
}
int vcap_get_bus_client_handle(struct vcap_dev *dev)
{
	struct msm_bus_scale_pdata *vcap_axi_client_pdata =
			dev->vcap_pdata->bus_client_pdata;
	dev->bus_client_handle =
			msm_bus_scale_register_client(vcap_axi_client_pdata);

	return 0;
}
/* Returns MBps of read/writes for the sampling window. */
static int mon_get_mbps(int n, u32 start_val, unsigned int us)
{
	u32 overflow, count;
	long long beats;

	count = get_l2_indirect_reg(L2PMnEVCNTR(n));
	overflow = get_l2_indirect_reg(L2PMOVSR);

	if (overflow & BIT(n))
		beats = 0xFFFFFFFF - start_val + count;
	else
		beats = count - start_val;

	beats *= USEC_PER_SEC;
	beats *= bytes_per_beat;
	do_div(beats, us);
	beats = DIV_ROUND_UP_ULL(beats, MBYTE);

	pr_debug("EV%d ov: %x, cnt: %x\n", n, overflow, count);

	return beats;
}

static void do_bw_sample(struct work_struct *work);
static DECLARE_DEFERRED_WORK(bw_sample, do_bw_sample);
static struct workqueue_struct *bw_sample_wq;

static DEFINE_MUTEX(bw_lock);
static ktime_t prev_ts;
static u32 prev_r_start_val;
static u32 prev_w_start_val;

static struct msm_bus_paths bw_levels[] = {
	BW(0), BW(200),
};
static struct msm_bus_scale_pdata bw_data = {
	.usecase = bw_levels,
	.num_usecases = ARRAY_SIZE(bw_levels),
	.name = "cpubw-krait",
	.active_only = 1,
};
static u32 bus_client;
static void compute_bw(int mbps);
static irqreturn_t mon_intr_handler(int irq, void *dev_id);

#define START_LIMIT	100 /* MBps */
static int start_monitoring(void)
{
	int mb_limit;
	int ret;

	bw_sample_wq = alloc_workqueue("cpubw-krait", WQ_HIGHPRI, 0);
	if (!bw_sample_wq) {
		pr_err("Unable to alloc workqueue\n");
		return -ENOMEM;
	}

	ret = request_threaded_irq(MON_INT, NULL, mon_intr_handler,
			  IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_RISING,
			  "cpubw_krait", mon_intr_handler);
	if (ret) {
		pr_err("Unable to register interrupt handler\n");
		return ret;
	}

	bus_client = msm_bus_scale_register_client(&bw_data);
	if (!bus_client) {
		pr_err("Unable to register bus client\n");
		ret = -ENODEV;
		goto bus_reg_fail;
	}

	compute_bw(START_LIMIT);

	mon_init();
	mon_disable(0);
	mon_disable(1);

	mb_limit = mult_frac(START_LIMIT, sample_ms, MSEC_PER_SEC);
	mb_limit /= 2;

	prev_r_start_val = mon_set_limit_mbyte(0, mb_limit);
	prev_w_start_val = mon_set_limit_mbyte(1, mb_limit);

	prev_ts = ktime_get();

	set_l2_indirect_reg(L2PMINTENSET, BIT(0));
	set_l2_indirect_reg(L2PMINTENSET, BIT(1));
	mon_enable(0);
	mon_enable(1);
	global_mon_enable(true);

	queue_delayed_work(bw_sample_wq, &bw_sample,
				msecs_to_jiffies(sample_ms));

	return 0;

bus_reg_fail:
	destroy_workqueue(bw_sample_wq);
	disable_irq(MON_INT);
	free_irq(MON_INT, mon_intr_handler);
	return ret;
}
Example #5
0
void msm_camio_set_perf_lvl(enum msm_bus_perf_setting perf_setting)
{
	static uint32_t bus_perf_client;
	int rc = 0;
	switch (perf_setting) {
	case S_INIT:
		bus_perf_client =
			msm_bus_scale_register_client(&cam_bus_client_pdata);
		if (!bus_perf_client) {
			pr_err("%s: Registration Failed!!!\n", __func__);
			bus_perf_client = 0;
			return;
		}
		pr_info("%s: S_INIT rc = %u\n", __func__, bus_perf_client);
		break;
	case S_EXIT:
		if (bus_perf_client) {
			pr_info("%s: S_EXIT\n", __func__);
			msm_bus_scale_unregister_client(bus_perf_client);
		} else
			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
		break;
	case S_PREVIEW:
		if (bus_perf_client) {
			rc = msm_bus_scale_client_update_request(
				bus_perf_client, 1);
			pr_info("%s: S_PREVIEW rc = %d\n", __func__, rc);
		} else
			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
		break;
	case S_VIDEO:
		if (bus_perf_client) {
			rc = msm_bus_scale_client_update_request(
				bus_perf_client, 2);
			pr_info("%s: S_VIDEO rc = %d\n", __func__, rc);
		} else
			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
		break;
	case S_CAPTURE:
		if (bus_perf_client) {
			rc = msm_bus_scale_client_update_request(
				bus_perf_client, 3);
			pr_info("%s: S_CAPTURE rc = %d\n", __func__, rc);
		} else
			pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
		break;
	case S_DEFAULT:
		break;
	default:
		pr_info("%s: INVALID CASE\n", __func__);
	}
}
Example #6
0
static int __init scm_pas_init(void)
{
	scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
	if (!scm_perf_client)
		pr_warn("unable to register bus client\n");
	scm_bus_clk = clk_get_sys("scm", "bus_clk");
	if (!IS_ERR(scm_bus_clk)) {
		clk_set_rate(scm_bus_clk, 64000000);
	} else {
		scm_bus_clk = NULL;
		pr_warn("unable to get bus clock\n");
	}
	return 0;
}
static int cpubw_krait_init(void)
{
	bw_sample_wq = alloc_workqueue("cpubw-krait", WQ_HIGHPRI, 0);
	if (!bw_sample_wq)
		return -ENOMEM;

	bus_client = msm_bus_scale_register_client(&bw_data);
	if (!bus_client) {
		pr_err("Unable to register bus client\n");
		destroy_workqueue(bw_sample_wq);
		return -ENODEV;
	}

	return 0;
}
static int of_read_bus_pdata(struct platform_device *pdev,
			     struct pil_tz_data *d)
{
	struct msm_bus_scale_pdata *pdata;
	pdata = msm_bus_cl_get_pdata(pdev);

	if (!pdata)
		return -EINVAL;

	d->bus_client = msm_bus_scale_register_client(pdata);
	if (!d->bus_client)
		return -EINVAL;

	return 0;
}
Example #9
0
static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
{
	struct drm_device *dev = gpu->dev;
	struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;

	if (!pdev) {
		dev_err(dev->dev, "could not find dtv pdata\n");
		return;
	}

	if (pdata->bus_scale_table) {
		gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
		DBG("bus scale client: %08x", gpu->bsc);
	}
}
static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
{
	struct drm_device *dev = mdp4_lcdc_encoder->base.dev;
	struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");

	if (!lcdc_pdata) {
		dev_err(dev->dev, "could not find lvds pdata\n");
		return;
	}

	if (lcdc_pdata->bus_scale_table) {
		mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client(
				lcdc_pdata->bus_scale_table);
		DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc);
	}
}
u32 res_trk_power_up(void)
{
	VCDRES_MSG_LOW("clk_regime_rail_enable");
	VCDRES_MSG_LOW("clk_regime_sel_rail_control");
#ifdef CONFIG_MSM_BUS_SCALING
	resource_context.pcl = msm_bus_scale_register_client(
		&vidc_bus_client_pdata);
	VCDRES_MSG_LOW("%s(), resource_context.pcl = %x", __func__,
		 resource_context.pcl);
	if (resource_context.pcl == 0) {
		dev_err(resource_context.device,
			"register bus client returned NULL\n");
		return false;
	}
#endif
	return res_trk_vidc_pwr_up();
}
Example #12
0
static int __init scm_pas_init(void)
{
	/* TODO: Remove once bus scaling driver is in place */
	if (!cpu_is_apq8064())
		scm_perf_client = msm_bus_scale_register_client(
				&scm_pas_bus_pdata);
	if (!scm_perf_client)
		pr_warn("unable to register bus client\n");
	scm_bus_clk = clk_get_sys("scm", "bus_clk");
	if (!IS_ERR(scm_bus_clk)) {
		clk_set_rate(scm_bus_clk, 64000000);
	} else {
		scm_bus_clk = NULL;
		pr_warn("unable to get bus clock\n");
	}
	return 0;
}
Example #13
0
/* not ironically named at all.. no, really.. */
static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
{
	struct drm_device *dev = mdp4_dtv_encoder->base.dev;
	struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");

	if (!dtv_pdata) {
		dev_err(dev->dev, "could not find dtv pdata\n");
		return;
	}

	if (dtv_pdata->bus_scale_table) {
		mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
				dtv_pdata->bus_scale_table);
		DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
		DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
		if (dtv_pdata->lcdc_power_save)
			dtv_pdata->lcdc_power_save(1);
	}
}
static int __init scm_pas_init(void)
{
	if (cpu_is_msm8974()) {
		scm_pas_bw_tbl[0].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
		scm_pas_bw_tbl[1].vectors[0].src = MSM_BUS_MASTER_CRYPTO_CORE0;
	} else {
		scm_bus_clk = clk_get_sys("scm", "bus_clk");
		if (!IS_ERR(scm_bus_clk)) {
			clk_set_rate(scm_bus_clk, 64000000);
		} else {
			scm_bus_clk = NULL;
			pr_warn("unable to get bus clock\n");
		}
	}

	scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
	if (!scm_perf_client)
		pr_warn("unable to register bus client\n");

	return 0;
}
Example #15
0
static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
{
	if (!mdata->bus_hdl) {
		struct msm_bus_scale_pdata *bus_pdata = &mdp_bus_scale_table;
		int i;

		for (i = 0; i < bus_pdata->num_usecases; i++) {
			mdp_bus_usecases[i].num_paths = 1;
			mdp_bus_usecases[i].vectors = &mdp_bus_vectors[i];
		}

		mdata->bus_hdl = msm_bus_scale_register_client(bus_pdata);
		if (!mdata->bus_hdl) {
			pr_err("not able to get bus scale\n");
			return -ENOMEM;
		}

		pr_debug("register bus_hdl=%x\n", mdata->bus_hdl);
	}
	return 0;
}
Example #16
0
static int __get_bus_vote_client(struct platform_device *pdev,
				  struct msm_iommu_drvdata *drvdata)
{
	int ret = 0;
	struct msm_bus_scale_pdata *bs_table;
	const char *dummy;

	/* Check whether bus scaling has been specified for this node */
	ret = of_property_read_string(pdev->dev.of_node, "qcom,msm-bus,name",
				      &dummy);
	if (ret)
		return 0;

	bs_table = msm_bus_cl_get_pdata(pdev);

	if (bs_table) {
		drvdata->bus_client = msm_bus_scale_register_client(bs_table);
		if (IS_ERR(&drvdata->bus_client)) {
			pr_err("%s(): Bus client register failed.\n", __func__);
			ret = -EINVAL;
		}
	}
	return ret;
}
int msm_jpeg_platform_init(struct platform_device *pdev,
	struct resource **mem,
	void **base,
	int *irq,
	irqreturn_t (*handler) (int, void *),
	void *context)
{
	int rc = -1;
	int jpeg_irq;
	struct resource *jpeg_mem, *vbif_mem, *jpeg_io, *jpeg_irq_res;
	void *jpeg_base;
	struct msm_jpeg_device *pgmn_dev =
		(struct msm_jpeg_device *) context;

	pgmn_dev->state = MSM_JPEG_IDLE;

	jpeg_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!jpeg_mem) {
		JPEG_PR_ERR("%s: jpeg no mem resource?\n", __func__);
		return -ENODEV;
	}

	vbif_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (!vbif_mem) {
		JPEG_PR_ERR("%s: vbif no mem resource?\n", __func__);
		return -ENODEV;
	}

	jpeg_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!jpeg_irq_res) {
		JPEG_PR_ERR("no irq resource?\n");
		return -ENODEV;
	}
	jpeg_irq = jpeg_irq_res->start;
	JPEG_DBG("%s base address: 0x%x, jpeg irq number: %d\n", __func__,
		jpeg_mem->start, jpeg_irq);

	pgmn_dev->jpeg_bus_client =
		msm_bus_scale_register_client(&msm_jpeg_bus_client_pdata);
	if (!pgmn_dev->jpeg_bus_client) {
		JPEG_PR_ERR("%s: Registration Failed!\n", __func__);
		pgmn_dev->jpeg_bus_client = 0;
		return -EINVAL;
	}
	msm_bus_scale_client_update_request(
		pgmn_dev->jpeg_bus_client, 1);

	jpeg_io = request_mem_region(jpeg_mem->start,
		resource_size(jpeg_mem), pdev->name);
	if (!jpeg_io) {
		JPEG_PR_ERR("%s: region already claimed\n", __func__);
		return -EBUSY;
	}

	jpeg_base = ioremap(jpeg_mem->start, resource_size(jpeg_mem));
	if (!jpeg_base) {
		rc = -ENOMEM;
		JPEG_PR_ERR("%s: ioremap failed\n", __func__);
		goto fail_remap;
	}

	pgmn_dev->jpeg_fs = regulator_get(&pgmn_dev->pdev->dev, "vdd");
	rc = regulator_enable(pgmn_dev->jpeg_fs);
	if (rc) {
		JPEG_PR_ERR("%s:%d]jpeg regulator get failed\n",
				__func__, __LINE__);
		goto fail_fs;
	}

	if (msm_jpeg_get_clk_info(pgmn_dev, pgmn_dev->pdev) < 0) {
		JPEG_PR_ERR("%s:%d]jpeg clock get failed\n",
				__func__, __LINE__);
		goto fail_fs;
	}

	rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
	 pgmn_dev->jpeg_clk, pgmn_dev->num_clk, 1);
	if (rc < 0) {
		JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc);
		goto fail_clk;
	}

	pgmn_dev->hw_version = readl_relaxed(jpeg_base +
		JPEG_HW_VERSION);
	JPEG_DBG_HIGH("%s:%d] jpeg HW version 0x%x", __func__, __LINE__,
		pgmn_dev->hw_version);

	pgmn_dev->jpeg_vbif = ioremap(vbif_mem->start, resource_size(vbif_mem));
	if (!pgmn_dev->jpeg_vbif) {
		rc = -ENOMEM;
		JPEG_PR_ERR("%s: ioremap failed\n", __func__);
		goto fail_vbif;
	}

	JPEG_DBG("%s:%d] jpeg_vbif 0x%x", __func__, __LINE__,
		(uint32_t)pgmn_dev->jpeg_vbif);

	rc = msm_jpeg_attach_iommu(pgmn_dev);
	if (rc < 0)
		goto fail_iommu;

	set_vbif_params(pgmn_dev, pgmn_dev->jpeg_vbif);

	if (pgmn_dev->hw_version == JPEG_8939) {
		writel_relaxed(0x0000550e,
				jpeg_base + JPEG_FE_QOS_CFG);
		writel_relaxed(0x00005555,
				jpeg_base + JPEG_WE_QOS_CFG);
	}

	rc = request_irq(jpeg_irq, handler, IRQF_TRIGGER_RISING, "jpeg",
		context);
	if (rc) {
		JPEG_PR_ERR("%s: request_irq failed, %d\n", __func__,
			jpeg_irq);
		goto fail_request_irq;
	}

	*mem  = jpeg_mem;
	*base = jpeg_base;
	*irq  = jpeg_irq;

	pgmn_dev->jpeg_client = msm_ion_client_create(-1, "camera/jpeg");
	JPEG_DBG("%s:%d] success\n", __func__, __LINE__);

	pgmn_dev->state = MSM_JPEG_INIT;
	return rc;

fail_request_irq:
	msm_jpeg_detach_iommu(pgmn_dev);

fail_iommu:
	iounmap(pgmn_dev->jpeg_vbif);


fail_vbif:
	msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
	pgmn_dev->jpeg_clk, pgmn_dev->num_clk, 0);

fail_clk:
	rc = regulator_disable(pgmn_dev->jpeg_fs);
	if (!rc)
		regulator_put(pgmn_dev->jpeg_fs);
	else
		JPEG_PR_ERR("%s:%d] regulator disable failed %d",
			__func__, __LINE__, rc);
	pgmn_dev->jpeg_fs = NULL;

fail_fs:
	iounmap(jpeg_base);

fail_remap:
	release_mem_region(jpeg_mem->start, resource_size(jpeg_mem));
	JPEG_DBG("%s:%d] fail\n", __func__, __LINE__);
	return rc;
}
int kgsl_pwrctrl_init(struct kgsl_device *device)
{
	int i, result = 0;
	struct clk *clk;
	struct platform_device *pdev =
		container_of(device->parentdev, struct platform_device, dev);
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
	struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
	const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
						pdata_dev->clk.name.clk,
						pdata_dev->clk.name.pclk,
						pdata_dev->imem_clk_name.clk,
						pdata_dev->imem_clk_name.pclk};

	/*acquire clocks */
	for (i = 1; i < KGSL_MAX_CLKS; i++) {
		if (clk_names[i]) {
			clk = clk_get(&pdev->dev, clk_names[i]);
			if (IS_ERR(clk))
				goto clk_err;
			pwr->grp_clks[i] = clk;
		}
	}
	/* Make sure we have a source clk for freq setting */
	clk = clk_get(&pdev->dev, clk_names[0]);
	pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;

	/* put the AXI bus into asynchronous mode with the graphics cores */
	if (pdata_pwr->set_grp_async != NULL)
		pdata_pwr->set_grp_async();

	if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
		KGSL_PWR_ERR(device, "invalid power level count: %d\n",
					 pdata_pwr->num_levels);
		result = -EINVAL;
		goto done;
	}
	pwr->num_pwrlevels = pdata_pwr->num_levels;
	pwr->active_pwrlevel = pdata_pwr->init_level;
	for (i = 0; i < pdata_pwr->num_levels; i++) {
		pwr->pwrlevels[i].gpu_freq =
		(pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
		clk_round_rate(pwr->grp_clks[0],
					   pdata_pwr->pwrlevel[i].
					   gpu_freq) : 0;
		pwr->pwrlevels[i].bus_freq =
			pdata_pwr->pwrlevel[i].bus_freq;
	}
	/* Do not set_rate for targets in sync with AXI */
	if (pwr->pwrlevels[0].gpu_freq > 0)
		clk_set_rate(pwr->grp_clks[0], pwr->
				pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);

	pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
	if (IS_ERR(pwr->gpu_reg))
		pwr->gpu_reg = NULL;

	pwr->power_flags = 0;

	pwr->nap_allowed = pdata_pwr->nap_allowed;
	pwr->interval_timeout = pdata_pwr->idle_timeout;
	pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
	if (IS_ERR(pwr->ebi1_clk))
		pwr->ebi1_clk = NULL;
	else
		clk_set_rate(pwr->ebi1_clk,
					 pwr->pwrlevels[pwr->active_pwrlevel].
						bus_freq);
	if (pdata_dev->clk.bus_scale_table != NULL) {
		pwr->pcl =
			msm_bus_scale_register_client(pdata_dev->clk.
							bus_scale_table);
		if (!pwr->pcl) {
			KGSL_PWR_ERR(device,
					"msm_bus_scale_register_client failed: "
					"id %d table %p", device->id,
					pdata_dev->clk.bus_scale_table);
			result = -EINVAL;
			goto done;
		}
	}

	/*acquire interrupt */
	pwr->interrupt_num =
		platform_get_irq_byname(pdev, pwr->irq_name);

	if (pwr->interrupt_num <= 0) {
		KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
					 pwr->interrupt_num);
		result = -EINVAL;
		goto done;
	}

	register_early_suspend(&device->display_off);
	return result;

clk_err:
	result = PTR_ERR(clk);
	KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
				 clk_names[i], result);

done:
	return result;
}
void msm_camio_bus_scale_cfg(struct msm_bus_scale_pdata *cam_bus_scale_table,
		enum msm_bus_perf_setting perf_setting)
{
	static uint32_t bus_perf_client;
	int rc = 0;
	switch (perf_setting) {
	case S_INIT:
		bus_perf_client =
			msm_bus_scale_register_client(cam_bus_scale_table);
		if (!bus_perf_client) {
			CDBG("%s: Registration Failed!!!\n", __func__);
			bus_perf_client = 0;
			return;
		}
		CDBG("%s: S_INIT rc = %u\n", __func__, bus_perf_client);
		break;
	case S_EXIT:
		if (bus_perf_client) {
			CDBG("%s: S_EXIT\n", __func__);
			msm_bus_scale_unregister_client(bus_perf_client);
		} else
			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
		break;
	case S_PREVIEW:
		if (bus_perf_client) {
			rc = msm_bus_scale_client_update_request(
				bus_perf_client, 1);
			CDBG("%s: S_PREVIEW rc = %d\n", __func__, rc);
		} else
			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
		break;
	case S_VIDEO:
		if (bus_perf_client) {
			rc = msm_bus_scale_client_update_request(
				bus_perf_client, 2);
			CDBG("%s: S_VIDEO rc = %d\n", __func__, rc);
		} else
			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
		break;
	case S_CAPTURE:
		if (bus_perf_client) {
			rc = msm_bus_scale_client_update_request(
				bus_perf_client, 3);
			CDBG("%s: S_CAPTURE rc = %d\n", __func__, rc);
		} else
			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
		break;
	case S_ZSL:
		if (bus_perf_client) {
			rc = msm_bus_scale_client_update_request(
				bus_perf_client, 4);
			CDBG("%s: S_ZSL rc = %d\n", __func__, rc);
		} else
			CDBG("%s: Bus Client NOT Registered!!!\n", __func__);
		break;
	case S_DEFAULT:
		break;
	default:
		pr_warning("%s: INVALID CASE\n", __func__);
	}
}
Example #20
0
static int dtv_probe(struct platform_device *pdev)
{
    struct msm_fb_data_type *mfd;
    struct fb_info *fbi;
    struct platform_device *mdp_dev = NULL;
    struct msm_fb_panel_data *pdata = NULL;
    int rc;

    if (pdev->id == 0) {
        dtv_pdata = pdev->dev.platform_data;
#ifdef CONFIG_MSM_BUS_SCALING
        if (!dtv_bus_scale_handle && dtv_pdata &&
                dtv_pdata->bus_scale_table) {
            dtv_bus_scale_handle =
                msm_bus_scale_register_client(
                    dtv_pdata->bus_scale_table);
            if (!dtv_bus_scale_handle) {
                pr_err("%s not able to get bus scale\n",
                       __func__);
            }
        }
#else
        ebi1_clk = clk_get(&pdev->dev, "mem_clk");
        if (IS_ERR(ebi1_clk)) {
            ebi1_clk = NULL;
            pr_warning("%s: Couldn't get ebi1 clock\n", __func__);
        }
#endif
        tv_src_clk = clk_get(&pdev->dev, "src_clk");
        if (IS_ERR(tv_src_clk)) {
            pr_err("error: can't get tv_src_clk!\n");
            return IS_ERR(tv_src_clk);
        }

        hdmi_clk = clk_get(&pdev->dev, "hdmi_clk");
        if (IS_ERR(hdmi_clk)) {
            pr_err("error: can't get hdmi_clk!\n");
            return IS_ERR(hdmi_clk);
        }

        mdp_tv_clk = clk_get(&pdev->dev, "mdp_clk");
        if (IS_ERR(mdp_tv_clk))
            mdp_tv_clk = NULL;

        return 0;
    }

    mfd = platform_get_drvdata(pdev);

    if (!mfd)
        return -ENODEV;

    if (mfd->key != MFD_KEY)
        return -EINVAL;

    if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
        return -ENOMEM;

    mdp_dev = platform_device_alloc("mdp", pdev->id);
    if (!mdp_dev)
        return -ENOMEM;

    /*
     * link to the latest pdev
     */
    mfd->pdev = mdp_dev;
    mfd->dest = DISPLAY_LCDC;

    /*
     * alloc panel device data
     */
    if (platform_device_add_data
            (mdp_dev, pdev->dev.platform_data,
             sizeof(struct msm_fb_panel_data))) {
        pr_err("dtv_probe: platform_device_add_data failed!\n");
        platform_device_put(mdp_dev);
        return -ENOMEM;
    }
    /*
     * data chain
     */
    pdata = (struct msm_fb_panel_data *)mdp_dev->dev.platform_data;
    pdata->on = dtv_on;
    pdata->off = dtv_off;
    pdata->next = pdev;

    /*
     * get/set panel specific fb info
     */
    mfd->panel_info = pdata->panel_info;
    if (hdmi_prim_display)
        mfd->fb_imgType = MSMFB_DEFAULT_TYPE;
    else
        mfd->fb_imgType = MDP_RGB_565;

    fbi = mfd->fbi;
    fbi->var.pixclock = mfd->panel_info.clk_rate;
    fbi->var.left_margin = mfd->panel_info.lcdc.h_back_porch;
    fbi->var.right_margin = mfd->panel_info.lcdc.h_front_porch;
    fbi->var.upper_margin = mfd->panel_info.lcdc.v_back_porch;
    fbi->var.lower_margin = mfd->panel_info.lcdc.v_front_porch;
    fbi->var.hsync_len = mfd->panel_info.lcdc.h_pulse_width;
    fbi->var.vsync_len = mfd->panel_info.lcdc.v_pulse_width;

    /*
     * set driver data
     */
    platform_set_drvdata(mdp_dev, mfd);

    /*
     * register in mdp driver
     */
    rc = platform_device_add(mdp_dev);
    if (rc)
        goto dtv_probe_err;

    pm_runtime_set_active(&pdev->dev);
    pm_runtime_enable(&pdev->dev);

    pdev_list[pdev_list_cnt++] = pdev;
    return 0;

dtv_probe_err:
#ifdef CONFIG_MSM_BUS_SCALING
    if (dtv_pdata && dtv_pdata->bus_scale_table &&
            dtv_bus_scale_handle > 0)
        msm_bus_scale_unregister_client(dtv_bus_scale_handle);
#endif
    platform_device_put(mdp_dev);
    return rc;
}
int __init acpuclk_cortex_init(struct platform_device *pdev,
	struct acpuclk_drv_data *data)
{
	unsigned long max_cpu_khz = 0;
	int i, rc;

	acpuclk_init_data = data;
	mutex_init(&acpuclk_init_data->lock);

	bus_perf_client = msm_bus_scale_register_client(
		acpuclk_init_data->bus_scale);
	if (!bus_perf_client) {
		pr_err("Unable to register bus client\n");
		BUG();
	}

	for (i = 0; i < NUM_SRC; i++) {
		if (!acpuclk_init_data->src_clocks[i].name)
			continue;
		acpuclk_init_data->src_clocks[i].clk =
			clk_get(&pdev->dev,
				acpuclk_init_data->src_clocks[i].name);
		BUG_ON(IS_ERR(acpuclk_init_data->src_clocks[i].clk));
		/*
		 * Prepare the PLLs because we enable/disable them
		 * in atomic context during power collapse/restore.
		 */
		BUG_ON(clk_prepare(acpuclk_init_data->src_clocks[i].clk));
	}

	/* Improve boot time by ramping up CPU immediately */
	for (i = 0; acpuclk_init_data->freq_tbl[i].khz != 0 &&
			acpuclk_init_data->freq_tbl[i].use_for_scaling; i++)
		max_cpu_khz = acpuclk_init_data->freq_tbl[i].khz;

	/* Initialize regulators */
	rc = increase_vdd(acpuclk_init_data->freq_tbl[i].vdd_cpu,
		acpuclk_init_data->freq_tbl[i].vdd_mem);
	if (rc)
		goto err_vdd;

	rc = regulator_enable(acpuclk_init_data->vdd_mem);
	if (rc) {
		dev_err(&pdev->dev, "regulator_enable for mem failed\n");
		goto err_vdd;
	}

	rc = regulator_enable(acpuclk_init_data->vdd_cpu);
	if (rc) {
		dev_err(&pdev->dev, "regulator_enable for cpu failed\n");
		goto err_vdd_cpu;
	}

	acpuclk_cortex_set_rate(0, max_cpu_khz, SETRATE_INIT);

	acpuclk_register(&acpuclk_cortex_data);
	cpufreq_table_init();

	return 0;

err_vdd_cpu:
	regulator_disable(acpuclk_init_data->vdd_mem);
err_vdd:
	regulator_put(acpuclk_init_data->vdd_mem);
	regulator_put(acpuclk_init_data->vdd_cpu);

	for (i = 0; i < NUM_SRC; i++) {
		if (!acpuclk_init_data->src_clocks[i].name)
			continue;
		clk_unprepare(acpuclk_init_data->src_clocks[i].clk);
		clk_put(acpuclk_init_data->src_clocks[i].clk);
	}
	return rc;
}
Example #22
0
static int tvenc_probe(struct platform_device *pdev)
{
	struct msm_fb_data_type *mfd;
	struct platform_device *mdp_dev = NULL;
	struct msm_fb_panel_data *pdata = NULL;
	int rc;

	if (pdev->id == 0) {
		tvenc_base = ioremap(pdev->resource[0].start,
					pdev->resource[0].end -
					pdev->resource[0].start + 1);
		if (!tvenc_base) {
			pr_err("tvenc_base ioremap failed!\n");
			return -ENOMEM;
		}
		tvenc_pdata = pdev->dev.platform_data;
		tvenc_resource_initialized = 1;
		return 0;
	}

	if (!tvenc_resource_initialized)
		return -EPERM;

	mfd = platform_get_drvdata(pdev);

	if (!mfd)
		return -ENODEV;

	if (mfd->key != MFD_KEY)
		return -EINVAL;

	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
		return -ENOMEM;

	if (tvenc_base == NULL)
		return -ENOMEM;

	mdp_dev = platform_device_alloc("mdp", pdev->id);
	if (!mdp_dev)
		return -ENOMEM;

	/*
	 * link to the latest pdev
	 */
	mfd->pdev = mdp_dev;
	mfd->dest = DISPLAY_TV;

	/*
	 * alloc panel device data
	 */
	if (platform_device_add_data
	    (mdp_dev, pdev->dev.platform_data,
	     sizeof(struct msm_fb_panel_data))) {
		pr_err("tvenc_probe: platform_device_add_data failed!\n");
		platform_device_put(mdp_dev);
		return -ENOMEM;
	}
	/*
	 * data chain
	 */
	pdata = mdp_dev->dev.platform_data;
	pdata->on = tvenc_on;
	pdata->off = tvenc_off;
	pdata->next = pdev;

	/*
	 * get/set panel specific fb info
	 */
	mfd->panel_info = pdata->panel_info;
#ifdef CONFIG_FB_MSM_MDP40
	mfd->fb_imgType = MDP_RGB_565;  /* base layer */
#else
	mfd->fb_imgType = MDP_YCRYCB_H2V1;
#endif

#ifdef CONFIG_MSM_BUS_SCALING
	if (!tvenc_bus_scale_handle && tvenc_pdata &&
		tvenc_pdata->bus_scale_table) {
		tvenc_bus_scale_handle =
			msm_bus_scale_register_client(
				tvenc_pdata->bus_scale_table);
		if (!tvenc_bus_scale_handle) {
			printk(KERN_ERR "%s not able to get bus scale\n",
				__func__);
		}
	}
#else
	mfd->ebi1_clk = clk_get(NULL, "ebi1_tv_clk");
	if (IS_ERR(mfd->ebi1_clk)) {
		rc = PTR_ERR(mfd->ebi1_clk);
		goto tvenc_probe_err;
	}
	clk_set_rate(mfd->ebi1_clk, MSM_SYSTEM_BUS_RATE);
#endif

	/*
	 * set driver data
	 */
	platform_set_drvdata(mdp_dev, mfd);

	/*
	 * register in mdp driver
	 */
	rc = platform_device_add(mdp_dev);
	if (rc)
		goto tvenc_probe_err;

	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);



	pdev_list[pdev_list_cnt++] = pdev;

	return 0;

tvenc_probe_err:
#ifdef CONFIG_MSM_BUS_SCALING
	if (tvenc_pdata && tvenc_pdata->bus_scale_table &&
		tvenc_bus_scale_handle > 0) {
		msm_bus_scale_unregister_client(tvenc_bus_scale_handle);
		tvenc_bus_scale_handle = 0;
	}
#endif
	platform_device_put(mdp_dev);
	return rc;
}
static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc)
{
	mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
			&mdp_bus_scale_table);
	DBG("bus scale client: %08x", mdp5_cmd_enc->bsc);
}
/*
 * Initialize all sensors ocmem driver data fields and register with the
 * ocmem driver.
 *
 * @return 0 upon success; < 0 upon error
 */
static int sns_ocmem_init(void)
{
	int i, err, ret;
	struct sns_ocmem_hdr_s addr_req_hdr;
	struct msm_bus_scale_pdata *sns_ocmem_bus_scale_pdata = NULL;

	/* register from OCMEM callack */
	sns_ctl.ocmem_handle =
		ocmem_notifier_register(SNS_OCMEM_CLIENT_ID,
		&sns_ctl.ocmem_nb);
	if (sns_ctl.ocmem_handle == NULL) {
		pr_err("OCMEM notifier registration failed\n");
		return -EFAULT;
	}

	/* populate platform data */
	ret = sns_ocmem_platform_data_populate(sns_ctl.pdev);
	if (ret) {
		dev_err(&sns_ctl.pdev->dev,
			"%s: failed to populate platform data, rc = %d\n",
			__func__, ret);
		return -ENODEV;
	}
	sns_ocmem_bus_scale_pdata = dev_get_drvdata(&sns_ctl.pdev->dev);

	sns_ctl.sns_ocmem_bus_client =
		msm_bus_scale_register_client(sns_ocmem_bus_scale_pdata);

	if (!sns_ctl.sns_ocmem_bus_client) {
		pr_err("%s: msm_bus_scale_register_client() failed\n",
			__func__);
		return -EFAULT;
	}

	/* load ADSP first */
	if (sns_load_adsp() != 0) {
		pr_err("%s: sns_load_adsp failed\n", __func__);
		return -EFAULT;
	}

	/*
	 * wait before open SMD channel from kernel to ensure
	 * channel has been openned already from ADSP side
	 */
	msleep(1000);

	err = smd_named_open_on_edge(SNS_OCMEM_SMD_CHANNEL,
					SMD_APPS_QDSP,
					&sns_ctl.smd_ch,
					NULL,
					sns_ocmem_smd_notify_data);
	if (err != 0) {
		pr_err("%s: smd_named_open_on_edge failed %i\n", __func__, err);
		return -EFAULT;
	}

	pr_debug("%s: SMD channel openned successfuly!\n", __func__);
	/* wait for the channel ready before writing data */
	msleep(1000);
	addr_req_hdr.msg_id = SNS_OCMEM_PHYS_ADDR_REQ_V01;
	addr_req_hdr.msg_type = SNS_OCMEM_MSG_TYPE_REQ;
	addr_req_hdr.msg_size = 0;

	err = sns_ocmem_send_msg(&addr_req_hdr, NULL);
	if (err != 0) {
		pr_err("%s: sns_ocmem_send_msg failed %i\n", __func__, err);
		return -ECOMM;
	}

	err = sns_ocmem_wait(DSPS_PHYS_ADDR_SET, 0);
	if (err != 0) {
		pr_err("%s: sns_ocmem_wait failed %i\n", __func__, err);
		return -EFAULT;
	}

	sns_ctl.map_list.num_chunks = sns_ctl.mem_segments_size;
	for (i = 0; i < sns_ctl.mem_segments_size; i++) {
		sns_ctl.map_list.chunks[i].ro =
			sns_ctl.mem_segments[i].type == 1 ? true : false;
		sns_ctl.map_list.chunks[i].ddr_paddr =
			sns_ctl.mem_segments[i].start_address;
		sns_ctl.map_list.chunks[i].size =
			sns_ctl.mem_segments[i].size;

		pr_debug("%s: chunks[%d]: ro=%d, ddr_paddr=0x%lx, size=%li",
				__func__, i,
				sns_ctl.map_list.chunks[i].ro,
				sns_ctl.map_list.chunks[i].ddr_paddr,
				sns_ctl.map_list.chunks[i].size);
	}

	return 0;
}
static int broadcast_tdmb_fc8080_probe(struct spi_device *spi)
{
	int rc;

	if(spi == NULL)
	{
		printk("broadcast_fc8080_probe spi is NULL, so spi can not be set\n");
		return -1;
	}

	fc8080_ctrl_info.TdmbPowerOnState = FALSE;
	fc8080_ctrl_info.spi_ptr 				= spi;
	fc8080_ctrl_info.spi_ptr->mode 			= SPI_MODE_0;
	fc8080_ctrl_info.spi_ptr->bits_per_word 	= 8;
	fc8080_ctrl_info.spi_ptr->max_speed_hz 	= (15000*1000);
	fc8080_ctrl_info.pdev = to_platform_device(&spi->dev);

#ifdef FEATURE_DMB_USE_BUS_SCALE
	fc8080_ctrl_info.bus_scale_pdata = msm_bus_cl_get_pdata(fc8080_ctrl_info.pdev);
	fc8080_ctrl_info.bus_scale_client_id = msm_bus_scale_register_client(fc8080_ctrl_info.bus_scale_pdata);
#endif

	// Once I have a spi_device structure I can do a transfer anytime

	rc = spi_setup(spi);
	printk("broadcast_tdmb_fc8080_probe spi_setup=%d\n", rc);
	bbm_com_hostif_select(NULL, 1);

#ifdef FEATURE_DMB_USE_XO
	fc8080_ctrl_info.clk = clk_get(&fc8080_ctrl_info.spi_ptr->dev, "xo");
	if (IS_ERR(fc8080_ctrl_info.clk)) {
		rc = PTR_ERR(fc8080_ctrl_info.clk);
		dev_err(&fc8080_ctrl_info.spi_ptr->dev, "could not get clock\n");
		return rc;
	}

	/* We enable/disable the clock only to assure it works */
	rc = clk_prepare_enable(fc8080_ctrl_info.clk);
	if (rc) {
		dev_err(&fc8080_ctrl_info.spi_ptr->dev, "could not enable clock\n");
		return rc;
	}
	clk_disable_unprepare(fc8080_ctrl_info.clk);
#endif

#ifdef FEATURE_DMB_USE_WORKQUEUE
	INIT_WORK(&fc8080_ctrl_info.spi_work, broacast_tdmb_spi_work);
	fc8080_ctrl_info.spi_wq = create_singlethread_workqueue("tdmb_spi_wq");
	if(fc8080_ctrl_info.spi_wq == NULL){
		printk("Failed to setup tdmb spi workqueue \n");
		return -ENOMEM;
	}
#endif

	tdmb_configure_gpios();

#ifdef FEATURE_DMB_USE_WORKQUEUE
	rc = request_irq(spi->irq, broadcast_tdmb_spi_isr, IRQF_DISABLED | IRQF_TRIGGER_FALLING,
	                   spi->dev.driver->name, &fc8080_ctrl_info);
#else
	rc = request_threaded_irq(spi->irq, NULL, broadcast_tdmb_spi_event_handler, IRQF_ONESHOT | IRQF_DISABLED | IRQF_TRIGGER_FALLING,
	                   spi->dev.driver->name, &fc8080_ctrl_info);
#endif
	printk("broadcast_tdmb_fc8080_probe request_irq=%d\n", rc);

	tdmb_fc8080_interrupt_lock();

	mutex_init(&fc8080_ctrl_info.mutex);

	wake_lock_init(&fc8080_ctrl_info.wake_lock,  WAKE_LOCK_SUSPEND, dev_name(&spi->dev));

	spin_lock_init(&fc8080_ctrl_info.spin_lock);

#ifdef FEATURE_DMB_USE_PM_QOS
	pm_qos_add_request(&fc8080_ctrl_info.pm_req_list, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
#endif
	printk("broadcast_fc8080_probe End\n");

	return rc;
}
Example #26
0
int msm_jpeg_platform_init(struct platform_device *pdev,
	struct resource **mem,
	void **base,
	int *irq,
	irqreturn_t (*handler) (int, void *),
	void *context)
{
	int rc = -1;
	int i = 0;
	int jpeg_irq;
	struct resource *jpeg_mem, *jpeg_io, *jpeg_irq_res;
	void *jpeg_base;
	struct msm_jpeg_device *pgmn_dev =
		(struct msm_jpeg_device *) context;

	pgmn_dev->state = MSM_JPEG_IDLE;

	jpeg_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!jpeg_mem) {
		JPEG_PR_ERR("%s: no mem resource?\n", __func__);
		return -ENODEV;
	}

	jpeg_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!jpeg_irq_res) {
		JPEG_PR_ERR("no irq resource?\n");
		return -ENODEV;
	}
	jpeg_irq = jpeg_irq_res->start;
	JPEG_DBG("%s base address: 0x%x, jpeg irq number: %d\n", __func__,
		jpeg_mem->start, jpeg_irq);

	pgmn_dev->jpeg_bus_client =
		msm_bus_scale_register_client(&msm_jpeg_bus_client_pdata);
	if (!pgmn_dev->jpeg_bus_client) {
		JPEG_PR_ERR("%s: Registration Failed!\n", __func__);
		pgmn_dev->jpeg_bus_client = 0;
		return -EINVAL;
	}
	msm_bus_scale_client_update_request(
		pgmn_dev->jpeg_bus_client, 1);

	jpeg_io = request_mem_region(jpeg_mem->start,
		resource_size(jpeg_mem), pdev->name);
	if (!jpeg_io) {
		JPEG_PR_ERR("%s: region already claimed\n", __func__);
		return -EBUSY;
	}

	jpeg_base = ioremap(jpeg_mem->start, resource_size(jpeg_mem));
	if (!jpeg_base) {
		rc = -ENOMEM;
		JPEG_PR_ERR("%s: ioremap failed\n", __func__);
		goto fail_remap;
	}

	pgmn_dev->jpeg_fs = regulator_get(&pgmn_dev->pdev->dev, "vdd");
	rc = regulator_enable(pgmn_dev->jpeg_fs);
	if (rc) {
		JPEG_PR_ERR("%s:%d]jpeg regulator get failed\n",
				__func__, __LINE__);
		goto fail_fs;
	}

	rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
	 pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 1);
	if (rc < 0) {
		JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc);
		goto fail_clk;
	}

	pgmn_dev->hw_version = readl_relaxed(jpeg_base +
		JPEG_HW_VERSION);
	JPEG_DBG_HIGH("%s:%d] jpeg HW version 0x%x", __func__, __LINE__,
		pgmn_dev->hw_version);

	pgmn_dev->jpeg_vbif = ioremap(VBIF_BASE_ADDRESS, VBIF_REGION_SIZE);
	if (!pgmn_dev->jpeg_vbif) {
		rc = -ENOMEM;
		JPEG_PR_ERR("%s:%d] ioremap failed\n", __func__, __LINE__);
		goto fail_vbif;
	}
	JPEG_DBG("%s:%d] jpeg_vbif 0x%x", __func__, __LINE__,
		(uint32_t)pgmn_dev->jpeg_vbif);

#ifdef CONFIG_MSM_IOMMU
	for (i = 0; i < pgmn_dev->iommu_cnt; i++) {
		rc = iommu_attach_device(pgmn_dev->domain,
				pgmn_dev->iommu_ctx_arr[i]);
		if (rc < 0) {
			rc = -ENODEV;
			JPEG_PR_ERR("%s: Device attach failed\n", __func__);
			goto fail_iommu;
		}
		JPEG_DBG("%s:%d] dom 0x%x ctx 0x%x", __func__, __LINE__,
					(uint32_t)pgmn_dev->domain,
					(uint32_t)pgmn_dev->iommu_ctx_arr[i]);
	}
#endif
	set_vbif_params(pgmn_dev, pgmn_dev->jpeg_vbif);

#ifdef CONFIG_MACH_LGE
	*mem  = jpeg_mem;
	*base = jpeg_base;
#endif

	rc = request_irq(jpeg_irq, handler, IRQF_TRIGGER_RISING, "jpeg",
		context);
	if (rc) {
		JPEG_PR_ERR("%s: request_irq failed, %d\n", __func__,
			jpeg_irq);
		goto fail_request_irq;
	}

#ifndef CONFIG_MACH_LGE /* QCT origin */
	*mem  = jpeg_mem;
	*base = jpeg_base;
#endif
	*irq  = jpeg_irq;

	pgmn_dev->jpeg_client = msm_ion_client_create(-1, "camera/jpeg");
	JPEG_DBG("%s:%d] success\n", __func__, __LINE__);

	pgmn_dev->state = MSM_JPEG_INIT;
	return rc;

fail_request_irq:
#ifdef CONFIG_MACH_LGE
	*mem  = NULL;
	*base = NULL;
#endif

#ifdef CONFIG_MSM_IOMMU
	for (i = 0; i < pgmn_dev->iommu_cnt; i++) {
		JPEG_PR_ERR("%s:%d] dom 0x%x ctx 0x%x", __func__, __LINE__,
					(uint32_t)pgmn_dev->domain,
					(uint32_t)pgmn_dev->iommu_ctx_arr[i]);
		iommu_detach_device(pgmn_dev->domain,
					pgmn_dev->iommu_ctx_arr[i]);
	}
#endif

fail_iommu:
	iounmap(pgmn_dev->jpeg_vbif);

fail_vbif:
	msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
	pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 0);

fail_clk:
	rc = regulator_disable(pgmn_dev->jpeg_fs);
	if (!rc)
		regulator_put(pgmn_dev->jpeg_fs);
	else
		JPEG_PR_ERR("%s:%d] regulator disable failed %d",
			__func__, __LINE__, rc);
	pgmn_dev->jpeg_fs = NULL;

fail_fs:
	iounmap(jpeg_base);

fail_remap:
	release_mem_region(jpeg_mem->start, resource_size(jpeg_mem));
	JPEG_DBG("%s:%d] fail\n", __func__, __LINE__);
	return rc;
}
Example #27
0
static int __devinit ehci_hsic_msm_probe(struct platform_device *pdev)
{
	struct usb_hcd *hcd;
	struct resource *res;
	struct msm_hsic_hcd *mehci;
	struct msm_hsic_host_platform_data *pdata;
	int ret;

	dev_dbg(&pdev->dev, "ehci_msm-hsic probe\n");

	/* After parent device's probe is executed, it will be put in suspend
	 * mode. When child device's probe is called, driver core is not
	 * resuming parent device due to which parent will be in suspend even
	 * though child is active. Hence resume the parent device explicitly.
	 */
	if (pdev->dev.parent)
		pm_runtime_get_sync(pdev->dev.parent);

	hcd = usb_create_hcd(&msm_hsic_driver, &pdev->dev,
				dev_name(&pdev->dev));
	if (!hcd) {
		dev_err(&pdev->dev, "Unable to create HCD\n");
		return  -ENOMEM;
	}

	hcd_to_bus(hcd)->skip_resume = true;

	hcd->irq = platform_get_irq(pdev, 0);
	if (hcd->irq < 0) {
		dev_err(&pdev->dev, "Unable to get IRQ resource\n");
		ret = hcd->irq;
		goto put_hcd;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "Unable to get memory resource\n");
		ret = -ENODEV;
		goto put_hcd;
	}

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);
	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -ENOMEM;
		goto put_hcd;
	}

	mehci = hcd_to_hsic(hcd);
	mehci->dev = &pdev->dev;
	pdata = mehci->dev->platform_data;

	mehci->ehci.susp_sof_bug = 1;
	mehci->ehci.reset_sof_bug = 1;

	mehci->ehci.resume_sof_bug = 1;

	if (pdata)
		mehci->ehci.log2_irq_thresh = pdata->log2_irq_thresh;

	res = platform_get_resource_byname(pdev,
			IORESOURCE_IRQ,
			"peripheral_status_irq");
	if (res)
		mehci->peripheral_status_irq = res->start;

	res = platform_get_resource_byname(pdev, IORESOURCE_IO, "wakeup");
	if (res) {
		mehci->wakeup_gpio = res->start;
		mehci->wakeup_irq = MSM_GPIO_TO_INT(res->start);
		dev_dbg(mehci->dev, "wakeup_irq: %d\n", mehci->wakeup_irq);
	}

	ret = msm_hsic_init_clocks(mehci, 1);
	if (ret) {
		dev_err(&pdev->dev, "unable to initialize clocks\n");
		ret = -ENODEV;
		goto unmap;
	}

	ret = msm_hsic_init_vddcx(mehci, 1);
	if (ret) {
		dev_err(&pdev->dev, "unable to initialize VDDCX\n");
		ret = -ENODEV;
		goto deinit_clocks;
	}

	init_completion(&mehci->rt_completion);
	init_completion(&mehci->gpt0_completion);
	ret = msm_hsic_reset(mehci);
	if (ret) {
		dev_err(&pdev->dev, "unable to initialize PHY\n");
		goto deinit_vddcx;
	}

	ehci_wq = create_singlethread_workqueue("ehci_wq");
	if (!ehci_wq) {
		dev_err(&pdev->dev, "unable to create workqueue\n");
		ret = -ENOMEM;
		goto deinit_vddcx;
	}

	INIT_WORK(&mehci->bus_vote_w, ehci_hsic_bus_vote_w);

	ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
	if (ret) {
		dev_err(&pdev->dev, "unable to register HCD\n");
		goto unconfig_gpio;
	}

	device_init_wakeup(&pdev->dev, 1);
	wake_lock_init(&mehci->wlock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev));
	wake_lock(&mehci->wlock);

	if (mehci->peripheral_status_irq) {
		ret = request_threaded_irq(mehci->peripheral_status_irq,
			NULL, hsic_peripheral_status_change,
			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
						| IRQF_SHARED,
			"hsic_peripheral_status", mehci);
		if (ret)
			dev_err(&pdev->dev, "%s:request_irq:%d failed:%d",
				__func__, mehci->peripheral_status_irq, ret);
	}

	/* configure wakeup irq */
	if (mehci->wakeup_irq) {
		ret = request_irq(mehci->wakeup_irq, msm_hsic_wakeup_irq,
				IRQF_TRIGGER_HIGH,
				"msm_hsic_wakeup", mehci);
		if (!ret) {
			disable_irq_nosync(mehci->wakeup_irq);
		} else {
			dev_err(&pdev->dev, "request_irq(%d) failed: %d\n",
					mehci->wakeup_irq, ret);
			mehci->wakeup_irq = 0;
		}
	}

	ret = ehci_hsic_msm_debugfs_init(mehci);
	if (ret)
		dev_dbg(&pdev->dev, "mode debugfs file is"
			"not available\n");

	if (pdata && pdata->bus_scale_table) {
		mehci->bus_perf_client =
		    msm_bus_scale_register_client(pdata->bus_scale_table);
		/* Configure BUS performance parameters for MAX bandwidth */
		if (mehci->bus_perf_client) {
			mehci->bus_vote = true;
			queue_work(ehci_wq, &mehci->bus_vote_w);
		} else {
			dev_err(&pdev->dev, "%s: Failed to register BUS "
						"scaling client!!\n", __func__);
		}
	}

	__mehci = mehci;

	/*
	 * This pdev->dev is assigned parent of root-hub by USB core,
	 * hence, runtime framework automatically calls this driver's
	 * runtime APIs based on root-hub's state.
	 */
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	/* Decrement the parent device's counter after probe.
	 * As child is active, parent will not be put into
	 * suspend mode.
	 */
	if (pdev->dev.parent)
		pm_runtime_put_sync(pdev->dev.parent);

	return 0;

unconfig_gpio:
	destroy_workqueue(ehci_wq);
	msm_hsic_config_gpios(mehci, 0);
deinit_vddcx:
	msm_hsic_init_vddcx(mehci, 0);
deinit_clocks:
	msm_hsic_init_clocks(mehci, 0);
unmap:
	iounmap(hcd->regs);
put_hcd:
	usb_put_hcd(hcd);

	return ret;
}
static int msm_rng_probe(struct platform_device *pdev)
{
	struct resource *res;
	struct msm_rng_device *msm_rng_dev = NULL;
	void __iomem *base = NULL;
	int error = 0;
	int ret = 0;
	struct device *dev;

	struct msm_bus_scale_pdata *qrng_platform_support = NULL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "invalid address\n");
		error = -EFAULT;
		goto err_exit;
	}

	msm_rng_dev = kzalloc(sizeof(msm_rng_dev), GFP_KERNEL);
	if (!msm_rng_dev) {
		dev_err(&pdev->dev, "cannot allocate memory\n");
		error = -ENOMEM;
		goto err_exit;
	}

	base = ioremap(res->start, resource_size(res));
	if (!base) {
		dev_err(&pdev->dev, "ioremap failed\n");
		error = -ENOMEM;
		goto err_iomap;
	}
	msm_rng_dev->base = base;

	/* create a handle for clock control */
	if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
					"qcom,msm-rng-iface-clk")))
		msm_rng_dev->prng_clk = clk_get(&pdev->dev,
							"iface_clk");
	else
		msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk");
	if (IS_ERR(msm_rng_dev->prng_clk)) {
		dev_err(&pdev->dev, "failed to register clock source\n");
		error = -EPERM;
		goto err_clk_get;
	}

	/* save away pdev and register driver data */
	msm_rng_dev->pdev = pdev;
	platform_set_drvdata(pdev, msm_rng_dev);

	if (pdev->dev.of_node) {
		/* Register bus client */
		qrng_platform_support = msm_bus_cl_get_pdata(pdev);
		msm_rng_dev->qrng_perf_client = msm_bus_scale_register_client(
						qrng_platform_support);
		msm_rng_device_info.qrng_perf_client =
					msm_rng_dev->qrng_perf_client;
		if (!msm_rng_dev->qrng_perf_client)
			pr_err("Unable to register bus client\n");
	}

	/* Enable rng h/w */
	error = msm_rng_enable_hw(msm_rng_dev);

	if (error)
		goto rollback_clk;

	/* register with hwrng framework */
	msm_rng.priv = (unsigned long) msm_rng_dev;
	error = hwrng_register(&msm_rng);
	if (error) {
		dev_err(&pdev->dev, "failed to register hwrng\n");
		error = -EPERM;
		goto rollback_clk;
	}
	ret = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);

	msm_rng_class = class_create(THIS_MODULE, "msm-rng");
	if (IS_ERR(msm_rng_class)) {
		pr_err("class_create failed\n");
		return PTR_ERR(msm_rng_class);
	}

	dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
				NULL, "msm-rng");
	if (IS_ERR(dev)) {
		pr_err("Device create failed\n");
		error = PTR_ERR(dev);
		goto unregister_chrdev;
	}
	cdev_init(&msm_rng_cdev, &msm_rng_fops);

	return ret;

unregister_chrdev:
	unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
rollback_clk:
	clk_put(msm_rng_dev->prng_clk);
err_clk_get:
	iounmap(msm_rng_dev->base);
err_iomap:
	kfree(msm_rng_dev);
err_exit:
	return error;
}
Example #29
0
static int ath6kl_hsic_probe(struct platform_device *pdev)
{
	struct ath6kl_platform_data *pdata = NULL;
	struct device *dev = &pdev->dev;
	int ret = 0;

	if (machine_is_apq8064_dma()) {
		ath6kl_dbg(ATH6KL_DBG_BOOT, "%s\n", __func__);
		previous = 0;
		ath6kl_toggle_radio(pdev->dev.platform_data, 1);
	} else {

		ath6kl_bus_scale_pdata = msm_bus_cl_get_pdata(pdev);
		bus_perf_client =
			msm_bus_scale_register_client(
				ath6kl_bus_scale_pdata);
		msm_bus_scale_client_update_request(bus_perf_client, 4);

		pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);

		if (!pdata) {
			ath6kl_err("%s: Could not allocate memory for platform data\n",
				__func__);
			return -ENOMEM;
		}

		if (ath6kl_dt_parse_vreg_info(dev, &pdata->wifi_chip_pwd,
				"qca,wifi-chip-pwd") != 0) {
			ath6kl_err("%s: parse vreg info for %s error\n",
				"chip_pwd", __func__);
			goto err;
		}

		if (ath6kl_dt_parse_vreg_info(dev, &pdata->wifi_vddpa,
				"qca,wifi-vddpa") != 0) {
			ath6kl_err("%s: parse vreg info for %s error\n",
				"vddpa", __func__);
			goto err;
		}

		if (ath6kl_dt_parse_vreg_info(dev, &pdata->wifi_vddio,
				"qca,wifi-vddio") != 0) {
			ath6kl_err("%s: parse vreg info for %s error\n",
				"vddio", __func__);
			goto err;
		}

		pdata->pdev = pdev;
		platform_set_drvdata(pdev, pdata);
		gpdata = pdata;

		if (pdata->wifi_chip_pwd != NULL) {
			ret = ath6kl_platform_power(pdata, 1);

			if (ret == 0 && ath6kl_bt_on == 0)
				ath6kl_hsic_bind(1);

			*platform_has_vreg = 1;
		}
	}

	return ret;

err:
	if (pdata != NULL)
		devm_kfree(dev, pdata);

	return -EINVAL;
}
static int __init cpubw_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct devfreq_dev_profile *p = &cpubw_profile;
	struct devfreq *df;
	u32 *data, ports[MAX_PATHS * 2];
	int ret, len, i;

	if (of_find_property(dev->of_node, PROP_PORTS, &len)) {
		len /= sizeof(ports[0]);
		if (len % 2 || len > ARRAY_SIZE(ports)) {
			dev_err(dev, "Unexpected number of ports\n");
			return -EINVAL;
		}

		ret = of_property_read_u32_array(dev->of_node, PROP_PORTS,
						 ports, len);
		if (ret)
			return ret;

		num_paths = len / 2;
	} else {
		return -EINVAL;
	}

	for (i = 0; i < num_paths; i++) {
		bw_levels[0].vectors[i].src = ports[2 * i];
		bw_levels[0].vectors[i].dst = ports[2 * i + 1];
		bw_levels[1].vectors[i].src = ports[2 * i];
		bw_levels[1].vectors[i].dst = ports[2 * i + 1];
	}
	bw_levels[0].num_paths = num_paths;
	bw_levels[1].num_paths = num_paths;

	if (of_find_property(dev->of_node, PROP_TBL, &len)) {
		len /= sizeof(*data);
		data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
		if (!data)
			return -ENOMEM;

		p->freq_table = devm_kzalloc(dev,
					     len * sizeof(*p->freq_table),
					     GFP_KERNEL);
		if (!p->freq_table)
			return -ENOMEM;

		ret = of_property_read_u32_array(dev->of_node, PROP_TBL,
						 data, len);
		if (ret)
			return ret;

		for (i = 0; i < len; i++)
			p->freq_table[i] = data[i];
		p->max_state = len;
		p->initial_freq = data[len-1];
	}

	bus_client = msm_bus_scale_register_client(&bw_data);
	if (!bus_client) {
		dev_err(dev, "Unable to register bus client\n");
		return -ENODEV;
	}

	df = devfreq_add_device(dev, &cpubw_profile, "msm_cpufreq", NULL);
	if (IS_ERR(df)) {
		msm_bus_scale_unregister_client(bus_client);
		return PTR_ERR(df);
	}

	return 0;
}