Beispiel #1
0
int mdss_iommu_init(void)
{
	struct iommu_domain *domain;
	int domain_idx, i;

	domain_idx = msm_register_domain(&mdp_iommu_layout);
	if (IS_ERR_VALUE(domain_idx))
		return -EINVAL;

	domain = msm_get_iommu_domain(domain_idx);
	if (!domain) {
		pr_err("unable to get iommu domain(%d)\n", domain_idx);
		return -EINVAL;
	}

	iommu_set_fault_handler(domain, mdss_iommu_fault_handler);

	for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
		mdp_iommu_ctx[i].ctx = msm_iommu_get_ctx(mdp_iommu_ctx[i].name);
		if (!mdp_iommu_ctx[i].ctx) {
			pr_warn("unable to get iommu ctx(%s)\n",
					mdp_iommu_ctx[i].name);
			return -EINVAL;
		}
	}
	mdss_res->iommu_domain = domain_idx;

	return 0;
}
Beispiel #2
0
int mdss_iommu_init(struct mdss_data_type *mdata)
{
    struct msm_iova_layout layout;
    struct iommu_domain *domain;
    struct mdss_iommu_map_type *iomap;
    int i;

    if (mdata->iommu_map) {
        pr_warn("iommu already initialized\n");
        return 0;
    }

    for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
        iomap = &mdss_iommu_map[i];

        layout.client_name = iomap->client_name;
        layout.partitions = iomap->partitions;
        layout.npartitions = iomap->npartitions;
        layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);

        iomap->domain_idx = msm_register_domain(&layout);
        if (IS_ERR_VALUE(iomap->domain_idx))
            return -EINVAL;

        domain = msm_get_iommu_domain(iomap->domain_idx);
        if (!domain) {
            pr_err("unable to get iommu domain(%d)\n",
                   iomap->domain_idx);
            return -EINVAL;
        }
        iommu_set_fault_handler(domain, mdss_iommu_fault_handler, NULL);

        iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
        if (!iomap->ctx) {
            pr_warn("unable to get iommu ctx(%s)\n",
                    iomap->ctx_name);
            return -EINVAL;
        }
    }

    mdata->iommu_map = mdss_iommu_map;

    return 0;
}
static struct msm_panel_common_pdata *mdss_mdp_populate_pdata(
	struct device *dev)
{
	struct msm_panel_common_pdata *pdata;
	struct msm_iova_layout layout;
	struct iommu_domain *domain;
	struct mdss_iommu_map_type *iomap;
	int i;

	if (mdata->iommu_map) {
		pr_warn("iommu already initialized\n");
		return 0;
	}

	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
		iomap = &mdss_iommu_map[i];

		layout.client_name = iomap->client_name;
		layout.partitions = iomap->partitions;
		layout.npartitions = iomap->npartitions;
		layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);

		iomap->domain_idx = msm_register_domain(&layout);
		if (IS_ERR_VALUE(iomap->domain_idx))
			return -EINVAL;

		domain = msm_get_iommu_domain(iomap->domain_idx);
		if (!domain) {
			pr_err("unable to get iommu domain(%d)\n",
				iomap->domain_idx);
			return -EINVAL;
		}
		iommu_set_fault_handler(domain, mdss_iommu_fault_handler, NULL);

	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		dev_err(dev, "could not allocate memory for pdata\n");
	return pdata;
}

static u32 mdss_mdp_res_init(struct platform_device *pdev)
{
	u32 rc;

	rc = mdss_mdp_irq_clk_setup(pdev);
	if (rc)
		return rc;

	mdss_res->clk_ctrl_wq = create_singlethread_workqueue("mdp_clk_wq");
	INIT_DELAYED_WORK(&mdss_res->clk_ctrl_worker,
			  mdss_mdp_clk_ctrl_workqueue_handler);

	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
	mdss_res->rev = MDSS_MDP_REG_READ(MDSS_REG_HW_VERSION);
	mdss_res->mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION);
	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);

	mdss_res->smp_mb_cnt = MDSS_MDP_SMP_MMB_BLOCKS;
	mdss_res->smp_mb_size = MDSS_MDP_SMP_MMB_SIZE;
	mdss_res->pipe_type_map = mdss_mdp_pipe_type_map;
	mdss_res->mixer_type_map = mdss_mdp_mixer_type_map;

	pr_info("mdss_revision=%x\n", mdss_res->rev);
	pr_info("mdp_hw_revision=%x\n", mdss_res->mdp_rev);

	mdss_res->res_init = true;
	mdss_res->timeout = HZ/20;
	mdss_res->clk_ena = false;
	mdss_res->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
	mdss_res->suspend = false;
	mdss_res->prim_ptype = NO_PANEL;
	mdss_res->irq_ena = false;

	return 0;
}
static int venus_register_domain(u32 fw_max_sz)
{
	struct msm_iova_partition venus_fw_partition = {
		.start = 0,
		.size = fw_max_sz,
	};
	struct msm_iova_layout venus_fw_layout = {
		.partitions = &venus_fw_partition,
		.npartitions = 1,
		.client_name = "pil_venus",
		.domain_flags = 0,
	};

	return msm_register_domain(&venus_fw_layout);
}

static int pil_venus_mem_setup(struct platform_device *pdev, size_t size)
{
	int domain;

	venus_data->iommu_fw_ctx  = msm_iommu_get_ctx("venus_fw");
	if (!venus_data->iommu_fw_ctx) {
		dprintk(VIDC_ERR, "No iommu fw context found\n");
		return -ENODEV;
	}

	if (!venus_data->venus_domain_num) {
		size = round_up(size, SZ_4K);
		domain = venus_register_domain(size);
		if (domain < 0) {
			dprintk(VIDC_ERR,
				"Venus fw iommu domain register failed\n");
			return -ENODEV;
		}
		venus_data->iommu_fw_domain = msm_get_iommu_domain(domain);
		if (!venus_data->iommu_fw_domain) {
			dprintk(VIDC_ERR, "No iommu fw domain found\n");
			return -ENODEV;
		}
		venus_data->venus_domain_num = domain;
		venus_data->fw_sz = size;
	}
	return 0;
}

static int pil_venus_auth_and_reset(struct platform_device *pdev)
{
	int rc;
	phys_addr_t fw_bias = venus_data->resources->firmware_base;
	void __iomem *reg_base = venus_data->reg_base;
	u32 ver;
	bool iommu_present = is_iommu_present(venus_data->resources);

	if (!fw_bias) {
		dprintk(VIDC_ERR, "FW bias is not valid\n");
		return -EINVAL;
	}
	/* Get Venus version number */
	if (!venus_data->hw_ver_checked) {
		ver = readl_relaxed(reg_base + VIDC_WRAPPER_HW_VERSION);
		venus_data->hw_ver_minor = (ver & 0x0FFF0000) >> 16;
		venus_data->hw_ver_major = (ver & 0xF0000000) >> 28;
		venus_data->hw_ver_checked = 1;
	}