Example #1
0
static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
                               long npages, unsigned long uaddr,
                               enum dma_data_direction direction,
                               struct dma_attrs *attrs)
{
    u64 rc = 0;
    u64 proto_tce, tce;
    u64 rpn;
    int ret = 0;
    long tcenum_start = tcenum, npages_start = npages;

    rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
    proto_tce = TCE_PCI_READ;
    if (direction != DMA_TO_DEVICE)
        proto_tce |= TCE_PCI_WRITE;

    while (npages--) {
        tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
        rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);

        if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
            ret = (int)rc;
            tce_free_pSeriesLP(tbl, tcenum_start,
                               (npages_start - (npages + 1)));
            break;
        }

        if (rc && printk_ratelimit()) {
            printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
            printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
            printk("\ttcenum  = 0x%llx\n", (u64)tcenum);
            printk("\ttce val = 0x%llx\n", tce );
            show_stack(current, (unsigned long *)__get_SP());
        }

        tcenum++;
        rpn++;
    }
    return ret;
}
Example #2
0
static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
{
	u64 rc;
	union tce_entry tce;

	tcenum <<= TCE_PAGE_FACTOR;
	npages <<= TCE_PAGE_FACTOR;

	tce.te_word = 0;

	rc = plpar_tce_stuff((u64)tbl->it_index,
			   (u64)tcenum << 12,
			   tce.te_word,
			   npages);

	if (rc && printk_ratelimit()) {
		printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
		printk("\trc      = %ld\n", rc);
		printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
		printk("\tnpages  = 0x%lx\n", (u64)npages);
		printk("\ttce val = 0x%lx\n", tce.te_word );
		show_stack(current, (unsigned long *)__get_SP());
	}
}
Example #3
0
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
                                    long npages, unsigned long uaddr,
                                    enum dma_data_direction direction,
                                    struct dma_attrs *attrs)
{
    u64 rc = 0;
    u64 proto_tce;
    u64 *tcep;
    u64 rpn;
    long l, limit;
    long tcenum_start = tcenum, npages_start = npages;
    int ret = 0;

    if (npages == 1) {
        return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                   direction, attrs);
    }

    tcep = __get_cpu_var(tce_page);

    /* This is safe to do since interrupts are off when we're called
     * from iommu_alloc{,_sg}()
     */
    if (!tcep) {
        tcep = (u64 *)__get_free_page(GFP_ATOMIC);
        /* If allocation fails, fall back to the loop implementation */
        if (!tcep) {
            return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
                                       direction, attrs);
        }
        __get_cpu_var(tce_page) = tcep;
    }

    rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
    proto_tce = TCE_PCI_READ;
    if (direction != DMA_TO_DEVICE)
        proto_tce |= TCE_PCI_WRITE;

    /* We can map max one pageful of TCEs at a time */
    do {
        /*
         * Set up the page with TCE data, looping through and setting
         * the values.
         */
        limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);

        for (l = 0; l < limit; l++) {
            tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
            rpn++;
        }

        rc = plpar_tce_put_indirect((u64)tbl->it_index,
                                    (u64)tcenum << 12,
                                    (u64)virt_to_abs(tcep),
                                    limit);

        npages -= limit;
        tcenum += limit;
    } while (npages > 0 && !rc);

    if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
        ret = (int)rc;
        tce_freemulti_pSeriesLP(tbl, tcenum_start,
                                (npages_start - (npages + limit)));
        return ret;
    }

    if (rc && printk_ratelimit()) {
        printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
        printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
        printk("\tnpages  = 0x%llx\n", (u64)npages);
        printk("\ttce[0] val = 0x%llx\n", tcep[0]);
        show_stack(current, (unsigned long *)__get_SP());
    }
    return ret;
}
Example #4
0
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
				     long npages, unsigned long uaddr,
				     enum dma_data_direction direction,
				     struct dma_attrs *attrs)
{
	u64 rc = 0;
	u64 proto_tce;
	u64 *tcep;
	u64 rpn;
	long l, limit;
	long tcenum_start = tcenum, npages_start = npages;
	int ret = 0;

	if (npages == 1) {
		return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
		                           direction, attrs);
	}

	tcep = __get_cpu_var(tce_page);

	if (!tcep) {
		tcep = (u64 *)__get_free_page(GFP_ATOMIC);
		
		if (!tcep) {
			return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
					    direction, attrs);
		}
		__get_cpu_var(tce_page) = tcep;
	}

	rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
	proto_tce = TCE_PCI_READ;
	if (direction != DMA_TO_DEVICE)
		proto_tce |= TCE_PCI_WRITE;

	
	do {
		limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);

		for (l = 0; l < limit; l++) {
			tcep[l] = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
			rpn++;
		}

		rc = plpar_tce_put_indirect((u64)tbl->it_index,
					    (u64)tcenum << 12,
					    (u64)virt_to_abs(tcep),
					    limit);

		npages -= limit;
		tcenum += limit;
	} while (npages > 0 && !rc);

	if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
		ret = (int)rc;
		tce_freemulti_pSeriesLP(tbl, tcenum_start,
		                        (npages_start - (npages + limit)));
		return ret;
	}

	if (rc && printk_ratelimit()) {
		printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
		printk("\tindex   = 0x%llx\n", (u64)tbl->it_index);
		printk("\tnpages  = 0x%llx\n", (u64)npages);
		printk("\ttce[0] val = 0x%llx\n", tcep[0]);
		show_stack(current, (unsigned long *)__get_SP());
	}
	return ret;
}
Example #5
0
static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
				     long npages, unsigned long uaddr,
				     enum dma_data_direction direction)
{
	u64 rc;
	union tce_entry tce, *tcep;
	long l, limit;

	if (TCE_PAGE_FACTOR == 0 && npages == 1)
		return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
					   direction);

	tcep = __get_cpu_var(tce_page);

	/* This is safe to do since interrupts are off when we're called
	 * from iommu_alloc{,_sg}()
	 */
	if (!tcep) {
		tcep = (void *)__get_free_page(GFP_ATOMIC);
		/* If allocation fails, fall back to the loop implementation */
		if (!tcep)
			return tce_build_pSeriesLP(tbl, tcenum, npages,
						   uaddr, direction);
		__get_cpu_var(tce_page) = tcep;
	}

	tcenum <<= TCE_PAGE_FACTOR;
	npages <<= TCE_PAGE_FACTOR;

	tce.te_word = 0;
	tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
	tce.te_rdwr = 1;
	if (direction != DMA_TO_DEVICE)
		tce.te_pciwr = 1;

	/* We can map max one pageful of TCEs at a time */
	do {
		/*
		 * Set up the page with TCE data, looping through and setting
		 * the values.
		 */
		limit = min_t(long, npages, 4096/sizeof(union tce_entry));

		for (l = 0; l < limit; l++) {
			tcep[l] = tce;
			tce.te_rpn++;
		}

		rc = plpar_tce_put_indirect((u64)tbl->it_index,
					    (u64)tcenum << 12,
					    (u64)virt_to_abs(tcep),
					    limit);

		npages -= limit;
		tcenum += limit;
	} while (npages > 0 && !rc);

	if (rc && printk_ratelimit()) {
		printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
		printk("\tindex   = 0x%lx\n", (u64)tbl->it_index);
		printk("\tnpages  = 0x%lx\n", (u64)npages);
		printk("\ttce[0] val = 0x%lx\n", tcep[0].te_word);
		show_stack(current, (unsigned long *)__get_SP());
	}
}