static int modem_ramdump(int enable, const struct subsys_desc *subsys)
{
	struct modem_data *drv = subsys_to_drv(subsys);
	int ret;

	if (!enable)
		return 0;

	ret = pil_mss_make_proxy_votes(&drv->q6->desc);
	if (ret)
		return ret;

	ret = pil_mss_reset_load_mba(&drv->q6->desc);
	if (ret)
		return ret;

	ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
	if (ret < 0)
		pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);

	dma_free_coherent(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys);

	pil_mss_shutdown(&drv->q6->desc);
	pil_mss_remove_proxy_votes(&drv->q6->desc);
	return ret;
}
Пример #2
0
static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
					size_t size)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	void *mdata_virt;
	dma_addr_t mdata_phys;
	s32 status;
	int ret;
	DEFINE_DMA_ATTRS(attrs);

	drv->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
	/* Make metadata physically contiguous and 4K aligned. */
	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
					GFP_KERNEL, &attrs);
	if (!mdata_virt) {
		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
		ret = -ENOMEM;
		goto fail;
	}
	memcpy(mdata_virt, metadata, size);
	/* wmb() ensures copy completes prior to starting authentication. */
	wmb();

	/* Initialize length counter to 0 */
	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);

	/* Pass address of meta-data to the MBA and perform authentication */
	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
		status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
		POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of headers timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for headers\n",
				status);
		ret = -EINVAL;
	}

	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, &attrs);

	if (!ret)
		return ret;

fail:
	modem_log_rmb_regs(drv->rmb_base);
	if (drv->q6) {
		pil_mss_shutdown(pil);
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys,
				&drv->attrs_dma);
		drv->q6->mba_virt = NULL;
	}
	return ret;
}
static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
					size_t size)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	void *mdata_virt;
	dma_addr_t mdata_phys;
	s32 status;
	int ret;

	drv->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	/* Make metadata physically contiguous and 4K aligned. */
	mdata_virt = dma_alloc_coherent(&drv->mba_mem_dev, size, &mdata_phys,
					GFP_KERNEL);
	if (!mdata_virt) {
		dev_err(pil->dev, "MBA(mdt)metadata buffer allocation failed\n");
#ifdef CONFIG_TRACE_MODEM_MEM_FAIL
		/*Need ramdump on exact alloc failure case for MODEM_mdt_AUTH 38K*/
                BUG_ON(1);
#endif
		return -ENOMEM;
	}
	memcpy(mdata_virt, metadata, size);
	/* wmb() ensures copy completes prior to starting authentication. */
	wmb();

	/* Initialize length counter to 0 */
	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);

	/* Pass address of meta-data to the MBA and perform authentication */
	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
		status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
		POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of headers timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for headers\n",
				status);
		ret = -EINVAL;
	}

	dma_free_coherent(&drv->mba_mem_dev, size, mdata_virt, mdata_phys);

	if (ret) {
		modem_log_rmb_regs(drv->rmb_base);
		if (drv->q6)
			pil_mss_shutdown(pil);
	}
	return ret;
}
int pil_mss_deinit_image(struct pil_desc *pil) {
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	int ret = 0;

	ret = pil_mss_shutdown(pil);

	/* In case of any failure where reclaim MBA memory
	 * could not happen, free the memory here */
	if (drv->q6->mba_virt)
		dma_free_coherent(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys);
	return ret;
}
Пример #5
0
int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
	int ret = 0;
	s32 status;

	if (err_path) {
		writel_relaxed(CMD_PILFAIL_NFY_MBA,
				drv->rmb_base + RMB_MBA_COMMAND);
		ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
			status == STATUS_MBA_UNLOCKED || status < 0,
			POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
		if (ret)
			dev_err(pil->dev, "MBA region unlock timed out\n");
		else if (status < 0)
			dev_err(pil->dev, "MBA unlock returned err status: %d\n",
						status);
	}

	ret = pil_mss_shutdown(pil);

	if (q6_drv->ahb_clk_vote)
		clk_disable_unprepare(q6_drv->ahb_clk);

	/* In case of any failure where reclaim MBA memory
	 * could not happen, free the memory here */
	if (drv->q6->mba_virt) {
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys,
				&drv->attrs_dma);
		drv->q6->mba_virt = NULL;
	}
	if (drv->q6->dp_virt) {
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->dp_size,
				drv->q6->dp_virt, drv->q6->dp_phys,
				&drv->attrs_dma);
		drv->q6->dp_virt = NULL;
	}
	return ret;
}