static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
				   size_t size)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	s32 status;
	u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);

	/* Begin image authentication */
	if (img_length == 0) {
		writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
		writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
	}
	/* Increment length counter */
	img_length += size;
	writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);

	status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
	if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d\n", status);
		modem_log_rmb_regs(drv->rmb_base);
		return -EINVAL;
	}

	return 0;
}
static int pil_msa_mba_auth(struct pil_desc *pil)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	int ret;
	s32 status;

	/* Wait for all segments to be authenticated or an error to occur */
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
			status == STATUS_AUTH_COMPLETE || status < 0,
			50, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of image timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for image\n", status);
		ret = -EINVAL;
	}

	if (drv->q6 && drv->q6->mba_virt) {
		/* Reclaim MBA memory. */
		dma_free_coherent(&drv->mba_mem_dev, drv->q6->mba_size,
					drv->q6->mba_virt, drv->q6->mba_phys);
		drv->q6->mba_virt = NULL;
	}

	if (ret)
		modem_log_rmb_regs(drv->rmb_base);
	return ret;
}
static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
					size_t size)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	void *mdata_virt;
	dma_addr_t mdata_phys;
	s32 status;
	int ret;
	DEFINE_DMA_ATTRS(attrs);

	drv->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
	/* Make metadata physically contiguous and 4K aligned. */
	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
					GFP_KERNEL, &attrs);
	if (!mdata_virt) {
		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
		ret = -ENOMEM;
		goto fail;
	}
	memcpy(mdata_virt, metadata, size);
	/* wmb() ensures copy completes prior to starting authentication. */
	wmb();

	/* Initialize length counter to 0 */
	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);

	/* Pass address of meta-data to the MBA and perform authentication */
	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
		status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
		POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of headers timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for headers\n",
				status);
		ret = -EINVAL;
	}

	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, &attrs);

	if (!ret)
		return ret;

fail:
	modem_log_rmb_regs(drv->rmb_base);
	if (drv->q6) {
		pil_mss_shutdown(pil);
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys,
				&drv->attrs_dma);
		drv->q6->mba_virt = NULL;
	}
	return ret;
}
static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
					size_t size)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	void *mdata_virt;
	dma_addr_t mdata_phys;
	s32 status;
	int ret;

	drv->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	/* Make metadata physically contiguous and 4K aligned. */
	mdata_virt = dma_alloc_coherent(&drv->mba_mem_dev, size, &mdata_phys,
					GFP_KERNEL);
	if (!mdata_virt) {
		dev_err(pil->dev, "MBA(mdt)metadata buffer allocation failed\n");
#ifdef CONFIG_TRACE_MODEM_MEM_FAIL
		/*Need ramdump on exact alloc failure case for MODEM_mdt_AUTH 38K*/
                BUG_ON(1);
#endif
		return -ENOMEM;
	}
	memcpy(mdata_virt, metadata, size);
	/* wmb() ensures copy completes prior to starting authentication. */
	wmb();

	/* Initialize length counter to 0 */
	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);

	/* Pass address of meta-data to the MBA and perform authentication */
	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
		status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
		POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of headers timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for headers\n",
				status);
		ret = -EINVAL;
	}

	dma_free_coherent(&drv->mba_mem_dev, size, mdata_virt, mdata_phys);

	if (ret) {
		modem_log_rmb_regs(drv->rmb_base);
		if (drv->q6)
			pil_mss_shutdown(pil);
	}
	return ret;
}
Beispiel #5
0
static int pil_msa_mba_auth(struct pil_desc *pil)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
	int ret;
	s32 status;

	/* Wait for all segments to be authenticated or an error to occur */
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
			status == STATUS_AUTH_COMPLETE || status < 0,
			50, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of image timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for image\n", status);
		ret = -EINVAL;
	}

	if (drv->q6) {
		if (drv->q6->mba_virt) {
			/* Reclaim MBA memory. */
			dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
					drv->q6->mba_virt, drv->q6->mba_phys,
					&drv->attrs_dma);
			drv->q6->mba_virt = NULL;
		}

		if (drv->q6->dp_virt) {
			/* Reclaim Modem DP memory. */
			dma_free_attrs(&drv->mba_mem_dev, drv->q6->dp_size,
					drv->q6->dp_virt, drv->q6->dp_phys,
					&drv->attrs_dma);
			drv->q6->dp_virt = NULL;
		}
	}
	if (ret)
		modem_log_rmb_regs(drv->rmb_base);
	if (q6_drv->ahb_clk_vote)
		clk_disable_unprepare(q6_drv->ahb_clk);

	return ret;
}
static int pil_mss_reset(struct pil_desc *pil)
{
	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
	phys_addr_t start_addr = pil_get_entry_addr(pil);
	int ret;

	if (drv->mba_phys)
		start_addr = drv->mba_phys;

	/*
	 * Bring subsystem out of reset and enable required
	 * regulators and clocks.
	 */
	ret = pil_mss_power_up(drv);
	if (ret)
		goto err_power;

	/* Deassert reset to subsystem and wait for propagation */
	ret = pil_mss_restart_reg(drv, 0);
	if (ret)
		goto err_restart;

	ret = pil_mss_enable_clks(drv);
	if (ret)
		goto err_clks;

	/* Program Image Address */
	if (drv->self_auth) {
		writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
		/*
		 * Ensure write to RMB base occurs before reset
		 * is released.
		 */
		mb();
	} else {
		writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
				drv->reg_base + QDSP6SS_RST_EVB);
	}

	ret = pil_q6v5_reset(pil);
	if (ret)
		goto err_q6v5_reset;

	/* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
	if (drv->self_auth) {
		ret = pil_msa_wait_for_mba_ready(drv);
		if (ret)
			goto err_q6v5_reset;
	}

	pr_info("pil: MBA boot done\n");
	drv->is_booted = true;

	return 0;

err_q6v5_reset:
	modem_log_rmb_regs(drv->rmb_base);
	pil_mss_disable_clks(drv);
err_clks:
	pil_mss_restart_reg(drv, 1);
err_restart:
	pil_mss_power_down(drv);
err_power:
	return ret;
}
Beispiel #7
0
static int pil_mss_reset(struct pil_desc *pil)
{
	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
	phys_addr_t start_addr = pil_get_entry_addr(pil);
	int ret;

	if (drv->mba_phys)
		start_addr = drv->mba_phys;

	/*
	 * Bring subsystem out of reset and enable required
	 * regulators and clocks.
	 */
	ret = pil_mss_power_up(drv);
	if (ret)
		goto err_power;

	/* Deassert reset to subsystem and wait for propagation */
	ret = pil_mss_restart_reg(drv, 0);
	if (ret)
		goto err_restart;

	ret = pil_mss_enable_clks(drv);
	if (ret)
		goto err_clks;

	if (modem_dbg_cfg)
		writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);

	/* Program Image Address */
	if (drv->self_auth) {
		writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
		/*
		 * Ensure write to RMB base occurs before reset
		 * is released.
		 */
		mb();
	} else {
		writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
				drv->reg_base + QDSP6SS_RST_EVB);
	}

	/* Program DP Address */
	if (drv->dp_virt) {
		writel_relaxed(drv->dp_phys, drv->rmb_base +
			       RMB_PMI_CODE_START);
		writel_relaxed(drv->dp_size, drv->rmb_base +
			       RMB_PMI_CODE_LENGTH);
	} else {
		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
	}
	/* Make sure RMB regs are written before bringing modem out of reset */
	mb();

	ret = pil_q6v5_reset(pil);
	if (ret)
		goto err_q6v5_reset;

	/* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
	if (drv->self_auth) {
		ret = pil_msa_wait_for_mba_ready(drv);
		if (ret)
			goto err_q6v5_reset;
	}

	dev_info(pil->dev, "MBA boot done\n");
	drv->is_booted = true;

	return 0;

err_q6v5_reset:
	modem_log_rmb_regs(drv->rmb_base);
	pil_mss_disable_clks(drv);
	if (drv->ahb_clk_vote)
		clk_disable_unprepare(drv->ahb_clk);
err_clks:
	pil_mss_restart_reg(drv, 1);
err_restart:
	pil_mss_power_down(drv);
err_power:
	return ret;
}