Ejemplo n.º 1
0
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
	struct device *dev = pdata->dev;

	/* Obtain the system clock setting */
	pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
	if (IS_ERR(pdata->sysclk)) {
		dev_err(dev, "dma devm_clk_get failed\n");
		return PTR_ERR(pdata->sysclk);
	}
	pdata->sysclk_rate = clk_get_rate(pdata->sysclk);

	/* Obtain the PTP clock setting */
	pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
	if (IS_ERR(pdata->ptpclk)) {
		dev_err(dev, "ptp devm_clk_get failed\n");
		return PTR_ERR(pdata->ptpclk);
	}
	pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);

	/* Retrieve the device cache coherency value */
	pdata->coherent = of_dma_is_coherent(dev->of_node);

	return 0;
}
Ejemplo n.º 2
0
bool device_dma_is_coherent(struct device *dev)
{
    bool coherent = false;

    if (IS_ENABLED(CONFIG_OF) && dev->of_node)
        coherent = of_dma_is_coherent(dev->of_node);
    else
        acpi_check_dma(ACPI_COMPANION(dev), &coherent);

    return coherent;
}
Ejemplo n.º 3
0
static int dwc3_config_soc_bus(struct dwc3 *dwc)
{
	int ret;

	/*
	 * Check if CCI is enabled for USB. Returns true
	 * if the node has property 'dma-coherent'. Otherwise
	 * returns false.
	 */
	if (of_dma_is_coherent(dwc->dev->of_node)) {
		u32 reg;

		reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG0);
		reg |= DWC3_GSBUSCFG0_DATRDREQINFO |
			DWC3_GSBUSCFG0_DESRDREQINFO |
			DWC3_GSBUSCFG0_DATWRREQINFO |
			DWC3_GSBUSCFG0_DESWRREQINFO;
		dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, reg);
	}

	/*
	 * This routes the usb dma traffic to go through CCI path instead
	 * of reaching DDR directly. This traffic routing is needed to
	 * to make SMMU and CCI work with USB dma.
	 */
	if (of_dma_is_coherent(dwc->dev->of_node) || dwc->dev->iommu_group) {
		ret = dwc3_enable_hw_coherency(dwc->dev);
		if (ret)
			return ret;
	}

	/* Send struct dwc3 to dwc3-of-simple for configuring VBUS
	 * during suspend/resume
	 */
	dwc3_set_simple_data(dwc);

	return 0;
}
Ejemplo n.º 4
0
enum dev_dma_attr device_get_dma_attr(struct device *dev)
{
	enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED;

	if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
		if (of_dma_is_coherent(dev->of_node))
			attr = DEV_DMA_COHERENT;
		else
			attr = DEV_DMA_NON_COHERENT;
	} else
		attr = acpi_get_dma_attr(ACPI_COMPANION(dev));

	return attr;
}
Ejemplo n.º 5
0
static int init_cc_resources(struct platform_device *plat_dev)
{
	struct resource *req_mem_cc_regs = NULL;
	struct cc_drvdata *new_drvdata;
	struct device *dev = &plat_dev->dev;
	struct device_node *np = dev->of_node;
	u32 signature_val;
	u64 dma_mask;
	int rc = 0;

	new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
	if (!new_drvdata)
		return -ENOMEM;

	platform_set_drvdata(plat_dev, new_drvdata);
	new_drvdata->plat_dev = plat_dev;

	new_drvdata->clk = of_clk_get(np, 0);
	new_drvdata->coherent = of_dma_is_coherent(np);

	/* Get device resources */
	/* First CC registers space */
	req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
	/* Map registers space */
	new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
	if (IS_ERR(new_drvdata->cc_base))
		return PTR_ERR(new_drvdata->cc_base);

	dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
		req_mem_cc_regs);
	dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
		&req_mem_cc_regs->start, new_drvdata->cc_base);

	/* Then IRQ */
	new_drvdata->irq = platform_get_irq(plat_dev, 0);
	if (new_drvdata->irq < 0) {
		dev_err(dev, "Failed getting IRQ resource\n");
		return new_drvdata->irq;
	}

	rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
			      IRQF_SHARED, "arm_cc7x", new_drvdata);
	if (rc) {
		dev_err(dev, "Could not register to interrupt %d\n",
			new_drvdata->irq);
		return rc;
	}
	dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);

	init_completion(&new_drvdata->hw_queue_avail);

	if (!plat_dev->dev.dma_mask)
		plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;

	dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
	while (dma_mask > 0x7fffffffUL) {
		if (dma_supported(&plat_dev->dev, dma_mask)) {
			rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
			if (!rc)
				break;
		}
		dma_mask >>= 1;
	}

	if (rc) {
		dev_err(dev, "Failed in dma_set_mask, mask=%par\n", &dma_mask);
		return rc;
	}

	rc = cc_clk_on(new_drvdata);
	if (rc) {
		dev_err(dev, "Failed to enable clock");
		return rc;
	}

	/* Verify correct mapping */
	signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
	if (signature_val != CC_DEV_SIGNATURE) {
		dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
			signature_val, (u32)CC_DEV_SIGNATURE);
		rc = -EINVAL;
		goto post_clk_err;
	}
	dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);

	/* Display HW versions */
	dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
		 CC_DEV_NAME_STR,
		 cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
		 DRV_MODULE_VERSION);

	rc = init_cc_regs(new_drvdata, true);
	if (rc) {
		dev_err(dev, "init_cc_regs failed\n");
		goto post_clk_err;
	}

	rc = cc_debugfs_init(new_drvdata);
	if (rc) {
		dev_err(dev, "Failed registering debugfs interface\n");
		goto post_regs_err;
	}

	rc = cc_fips_init(new_drvdata);
	if (rc) {
		dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
		goto post_debugfs_err;
	}
	rc = cc_sram_mgr_init(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_sram_mgr_init failed\n");
		goto post_fips_init_err;
	}

	new_drvdata->mlli_sram_addr =
		cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
	if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
		dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
		rc = -ENOMEM;
		goto post_sram_mgr_err;
	}

	rc = cc_req_mgr_init(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_req_mgr_init failed\n");
		goto post_sram_mgr_err;
	}

	rc = cc_buffer_mgr_init(new_drvdata);
	if (rc) {
		dev_err(dev, "buffer_mgr_init failed\n");
		goto post_req_mgr_err;
	}

	rc = cc_pm_init(new_drvdata);
	if (rc) {
		dev_err(dev, "ssi_power_mgr_init failed\n");
		goto post_buf_mgr_err;
	}

	rc = cc_ivgen_init(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_ivgen_init failed\n");
		goto post_power_mgr_err;
	}

	/* Allocate crypto algs */
	rc = cc_cipher_alloc(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_cipher_alloc failed\n");
		goto post_ivgen_err;
	}

	/* hash must be allocated before aead since hash exports APIs */
	rc = cc_hash_alloc(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_hash_alloc failed\n");
		goto post_cipher_err;
	}

	rc = cc_aead_alloc(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_aead_alloc failed\n");
		goto post_hash_err;
	}

	/* If we got here and FIPS mode is enabled
	 * it means all FIPS test passed, so let TEE
	 * know we're good.
	 */
	cc_set_ree_fips_status(new_drvdata, true);

	return 0;

post_hash_err:
	cc_hash_free(new_drvdata);
post_cipher_err:
	cc_cipher_free(new_drvdata);
post_ivgen_err:
	cc_ivgen_fini(new_drvdata);
post_power_mgr_err:
	cc_pm_fini(new_drvdata);
post_buf_mgr_err:
	 cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
	cc_req_mgr_fini(new_drvdata);
post_sram_mgr_err:
	cc_sram_mgr_fini(new_drvdata);
post_fips_init_err:
	cc_fips_fini(new_drvdata);
post_debugfs_err:
	cc_debugfs_fini(new_drvdata);
post_regs_err:
	fini_cc_regs(new_drvdata);
post_clk_err:
	cc_clk_off(new_drvdata);
	return rc;
}