static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
                         struct lli *lli, dma_addr_t dma_lli,
                         void __iomem *dmadr_addr, int dir)
{
    int i, idx = 0;
    int fis_len = 0;
    dma_addr_t next_llp;
    int bl;

    dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
            " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
            (u32)dmadr_addr);

    bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);

    for (i = 0; i < num_elems; i++, sg++) {
        u32 addr, offset;
        u32 sg_len, len;

        addr = (u32) sg_dma_address(sg);
        sg_len = sg_dma_len(sg);

        dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
                "=%d\n", __func__, i, addr, sg_len);

        while (sg_len) {
            if (idx >= SATA_DWC_DMAC_LLI_NUM) {

                dev_err(host_pvt.dwc_dev, "LLI table overrun "
                        "(idx=%d)\n", idx);
                break;
            }
            len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
                  SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;

            offset = addr & 0xffff;
            if ((offset + sg_len) > 0x10000)
                len = 0x10000 - offset;

            if (fis_len + len > 8192) {
                dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
                        "%d(0x%x) len=%d(0x%x)\n", fis_len,
                        fis_len, len, len);
                len = 8192 - fis_len;
                fis_len = 0;
            } else {
                fis_len += len;
            }
            if (fis_len == 8192)
                fis_len = 0;

            if (dir == DMA_FROM_DEVICE) {
                lli[idx].dar = cpu_to_le32(addr);
                lli[idx].sar = cpu_to_le32((u32)dmadr_addr);

                lli[idx].ctl.low = cpu_to_le32(
                                       DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
                                       DMA_CTL_SMS(0) |
                                       DMA_CTL_DMS(1) |
                                       DMA_CTL_SRC_MSIZE(bl) |
                                       DMA_CTL_DST_MSIZE(bl) |
                                       DMA_CTL_SINC_NOCHANGE |
                                       DMA_CTL_SRC_TRWID(2) |
                                       DMA_CTL_DST_TRWID(2) |
                                       DMA_CTL_INT_EN |
                                       DMA_CTL_LLP_SRCEN |
                                       DMA_CTL_LLP_DSTEN);
            } else {
                lli[idx].sar = cpu_to_le32(addr);
                lli[idx].dar = cpu_to_le32((u32)dmadr_addr);

                lli[idx].ctl.low = cpu_to_le32(
                                       DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
                                       DMA_CTL_SMS(1) |
                                       DMA_CTL_DMS(0) |
                                       DMA_CTL_SRC_MSIZE(bl) |
                                       DMA_CTL_DST_MSIZE(bl) |
                                       DMA_CTL_DINC_NOCHANGE |
                                       DMA_CTL_SRC_TRWID(2) |
                                       DMA_CTL_DST_TRWID(2) |
                                       DMA_CTL_INT_EN |
                                       DMA_CTL_LLP_SRCEN |
                                       DMA_CTL_LLP_DSTEN);
            }

            dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
                    "0x%08x val: 0x%08x\n", __func__,
                    len, DMA_CTL_BLK_TS(len / 4));


            lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
                                            (len / 4));

            next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
                                   lli)));


            next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);

            lli[idx].llp = cpu_to_le32(next_llp);
            idx++;
            sg_len -= len;
            addr += len;
        }
    }

    if (idx) {
        lli[idx-1].llp = 0x00000000;
        lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;


        dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
                       DMA_BIDIRECTIONAL);
    }

    return idx;
}
Пример #2
0
/*
 * Function: map_sg_to_lli
 * The Synopsis driver has a comment proposing that better performance
 * is possible by only enabling interrupts on the last item in the linked list.
 * However, it seems that could be a problem if an error happened on one of the
 * first items.  The transfer would halt, but no error interrupt would occur.
 * Currently this function sets interrupts enabled for each linked list item:
 * DMA_CTL_INT_EN.
 */
static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
			struct lli *lli, dma_addr_t dma_lli,
			void __iomem *dmadr_addr, int dir)
{
	int i, idx = 0;
	int fis_len = 0;
	dma_addr_t next_llp;
	int bl;

	dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
		" dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
		(u32)dmadr_addr);

	bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);

	for (i = 0; i < num_elems; i++, sg++) {
		u32 addr, offset;
		u32 sg_len, len;

		addr = (u32) sg_dma_address(sg);
		sg_len = sg_dma_len(sg);

		dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
			"=%d\n", __func__, i, addr, sg_len);

		while (sg_len) {
			if (idx >= SATA_DWC_DMAC_LLI_NUM) {
				/* The LLI table is not large enough. */
				dev_err(host_pvt.dwc_dev, "LLI table overrun "
				"(idx=%d)\n", idx);
				break;
			}
			len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
				SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;

			offset = addr & 0xffff;
			if ((offset + sg_len) > 0x10000)
				len = 0x10000 - offset;

			/*
			 * Make sure a LLI block is not created that will span
			 * 8K max FIS boundary.  If the block spans such a FIS
			 * boundary, there is a chance that a DMA burst will
			 * cross that boundary -- this results in an error in
			 * the host controller.
			 */
			if (fis_len + len > 8192) {
				dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
					"%d(0x%x) len=%d(0x%x)\n", fis_len,
					 fis_len, len, len);
				len = 8192 - fis_len;
				fis_len = 0;
			} else {
				fis_len += len;
			}
			if (fis_len == 8192)
				fis_len = 0;

			/*
			 * Set DMA addresses and lower half of control register
			 * based on direction.
			 */
			if (dir == DMA_FROM_DEVICE) {
				lli[idx].dar = cpu_to_le32(addr);
				lli[idx].sar = cpu_to_le32((u32)dmadr_addr);

				lli[idx].ctl.low = cpu_to_le32(
					DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
					DMA_CTL_SMS(0) |
					DMA_CTL_DMS(1) |
					DMA_CTL_SRC_MSIZE(bl) |
					DMA_CTL_DST_MSIZE(bl) |
					DMA_CTL_SINC_NOCHANGE |
					DMA_CTL_SRC_TRWID(2) |
					DMA_CTL_DST_TRWID(2) |
					DMA_CTL_INT_EN |
					DMA_CTL_LLP_SRCEN |
					DMA_CTL_LLP_DSTEN);
			} else {	/* DMA_TO_DEVICE */
				lli[idx].sar = cpu_to_le32(addr);
				lli[idx].dar = cpu_to_le32((u32)dmadr_addr);

				lli[idx].ctl.low = cpu_to_le32(
					DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
					DMA_CTL_SMS(1) |
					DMA_CTL_DMS(0) |
					DMA_CTL_SRC_MSIZE(bl) |
					DMA_CTL_DST_MSIZE(bl) |
					DMA_CTL_DINC_NOCHANGE |
					DMA_CTL_SRC_TRWID(2) |
					DMA_CTL_DST_TRWID(2) |
					DMA_CTL_INT_EN |
					DMA_CTL_LLP_SRCEN |
					DMA_CTL_LLP_DSTEN);
			}

			dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
				"0x%08x val: 0x%08x\n", __func__,
				len, DMA_CTL_BLK_TS(len / 4));

			/* Program the LLI CTL high register */
			lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
						(len / 4));

			/* Program the next pointer.  The next pointer must be
			 * the physical address, not the virtual address.
			 */
			next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
							lli)));

			/* The last 2 bits encode the list master select. */
			next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);

			lli[idx].llp = cpu_to_le32(next_llp);
			idx++;
			sg_len -= len;
			addr += len;
		}
	}

	/*
	 * The last next ptr has to be zero and the last control low register
	 * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
	 * and destination enable) set back to 0 (disabled.) This is what tells
	 * the core that this is the last item in the linked list.
	 */
	if (idx) {
		lli[idx-1].llp = 0x00000000;
		lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;

		/* Flush cache to memory */
		dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
			       DMA_BIDIRECTIONAL);
	}

	return idx;
}