static void mmci_dma_setup(struct mmci_host *host) { const char *rxname, *txname; host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); /* initialize pre request cookie */ host->next_data.cookie = 1; /* * If only an RX channel is specified, the driver will * attempt to use it bidirectionally, however if it is * is specified but cannot be located, DMA will be disabled. */ if (host->dma_rx_channel && !host->dma_tx_channel) host->dma_tx_channel = host->dma_rx_channel; if (host->dma_rx_channel) rxname = dma_chan_name(host->dma_rx_channel); else rxname = "none"; if (host->dma_tx_channel) txname = dma_chan_name(host->dma_tx_channel); else txname = "none"; dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", rxname, txname); /* * Limit the maximum segment size in any SG entry according to * the parameters of the DMA engine device. */ if (host->dma_tx_channel) { struct device *dev = host->dma_tx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } if (host->dma_rx_channel) { struct device *dev = host->dma_rx_channel->device->dev; unsigned int max_seg_size = dma_get_max_seg_size(dev); if (max_seg_size < host->mmc->max_seg_size) host->mmc->max_seg_size = max_seg_size; } if (host->ops && host->ops->dma_setup) host->ops->dma_setup(host); }
int iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, unsigned long mask, enum dma_data_direction direction, struct dma_attrs *attrs) { dma_addr_t dma_next = 0, dma_addr; struct scatterlist *s, *outs, *segstart; int outcount, incount, i, build_fail = 0; unsigned int align; unsigned long handle; unsigned int max_seg_size; BUG_ON(direction == DMA_NONE); if ((nelems == 0) || !tbl) return 0; outs = s = segstart = &sglist[0]; outcount = 1; incount = nelems; handle = 0; /* Init first segment length for backout at failure */ outs->dma_length = 0; DBG("sg mapping %d elements:\n", nelems); max_seg_size = dma_get_max_seg_size(dev); for_each_sg(sglist, s, nelems, i) { unsigned long vaddr, npages, entry, slen; slen = s->length; /* Sanity check */ if (slen == 0) { dma_next = 0; continue; } /* Allocate iommu entries for that segment */ vaddr = (unsigned long) sg_virt(s); npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); align = 0; if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && (vaddr & ~PAGE_MASK) == 0) align = PAGE_SHIFT - tbl->it_page_shift; entry = iommu_range_alloc(dev, tbl, npages, &handle, mask >> tbl->it_page_shift, align); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); /* Handle failure */ if (unlikely(entry == DMA_ERROR_CODE)) { if (printk_ratelimit()) dev_info(dev, "iommu_alloc failed, tbl %p " "vaddr %lx npages %lu\n", tbl, vaddr, npages); goto failure; } /* Convert entry to a dma_addr_t */ entry += tbl->it_offset; dma_addr = entry << tbl->it_page_shift; dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", npages, entry, dma_addr); /* Insert into HW table */ build_fail = ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK(tbl), direction, attrs); if(unlikely(build_fail)) goto failure; /* If we are in an open segment, try merging */ if (segstart != s) { DBG(" - trying merge...\n"); /* We cannot merge if: * - allocated dma_addr isn't contiguous to previous allocation */ if (novmerge || (dma_addr != dma_next) || (outs->dma_length + s->length > max_seg_size)) { /* Can't merge: create a new segment */ segstart = s; outcount++; outs = sg_next(outs); DBG(" can't merge, new segment.\n"); } else { outs->dma_length += s->length; DBG(" merged, new len: %ux\n", outs->dma_length); } } if (segstart == s) { /* This is a new segment, fill entries */ DBG(" - filling new segment.\n"); outs->dma_address = dma_addr; outs->dma_length = slen; } /* Calculate next page pointer for contiguous check */ dma_next = dma_addr + slen; DBG(" - dma next is: %lx\n", dma_next); }
static int mxs_mmc_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(mxs_mmc_dt_ids, &pdev->dev); struct device_node *np = pdev->dev.of_node; struct mxs_mmc_host *host; struct mmc_host *mmc; struct resource *iores; int ret = 0, irq_err; struct regulator *reg_vmmc; struct mxs_ssp *ssp; irq_err = platform_get_irq(pdev, 0); if (irq_err < 0) return irq_err; mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); if (!mmc) return -ENOMEM; host = mmc_priv(mmc); ssp = &host->ssp; ssp->dev = &pdev->dev; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); ssp->base = devm_ioremap_resource(&pdev->dev, iores); if (IS_ERR(ssp->base)) { ret = PTR_ERR(ssp->base); goto out_mmc_free; } ssp->devid = (enum mxs_ssp_id) of_id->data; host->mmc = mmc; host->sdio_irq_en = 0; reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc"); if (!IS_ERR(reg_vmmc)) { ret = regulator_enable(reg_vmmc); if (ret) { dev_err(&pdev->dev, "Failed to enable vmmc regulator: %d\n", ret); goto out_mmc_free; } } ssp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ssp->clk)) { ret = PTR_ERR(ssp->clk); goto out_mmc_free; } ret = clk_prepare_enable(ssp->clk); if (ret) goto out_mmc_free; ret = mxs_mmc_reset(host); if (ret) { dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret); goto out_clk_disable; } ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); if (!ssp->dmach) { dev_err(mmc_dev(host->mmc), "%s: failed to request dma\n", __func__); ret = -ENODEV; goto out_clk_disable; } /* set mmc core parameters */ mmc->ops = &mxs_mmc_ops; mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; host->broken_cd = of_property_read_bool(np, "broken-cd"); mmc->f_min = 400000; mmc->f_max = 288000000; ret = mmc_of_parse(mmc); if (ret) goto out_clk_disable; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->max_segs = 52; mmc->max_blk_size = 1 << 0xf; mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff; mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff; mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev); platform_set_drvdata(pdev, mmc); ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, dev_name(&pdev->dev), host); if (ret) goto out_free_dma; spin_lock_init(&host->lock); ret = mmc_add_host(mmc); if (ret) goto out_free_dma; dev_info(mmc_dev(host->mmc), "initialized\n"); return 0; out_free_dma: dma_release_channel(ssp->dmach); out_clk_disable: clk_disable_unprepare(ssp->clk); out_mmc_free: mmc_free_host(mmc); return ret; }