Пример #1
0
static struct nvos_pagemap *nv_alloc_pages(unsigned int count,
        pgprot_t prot, bool contiguous, int create_mapping)
{
    struct nvos_pagemap *pm;
    size_t size;
    unsigned int i = 0;

    size = sizeof(struct nvos_pagemap) + sizeof(struct page *)*(count-1);
    pm = kzalloc(size, GFP_KERNEL);
    if (!pm)
        return NULL;

    if (count==1) contiguous = true;

    if (contiguous) {
        size_t order = get_order(count << PAGE_SHIFT);
        struct page *compound_page;
        compound_page = alloc_pages(nv_gfp_pool, order);
        if (!compound_page) goto fail;

        split_page(compound_page, order);
        for (i=0; i<count; i++)
            pm->pages[i] = nth_page(compound_page, i);

        for ( ; i < (1<<order); i++)
            __free_page(nth_page(compound_page, i));
        i = count;
    } else {
        for (i=0; i<count; i++) {
            pm->pages[i] = alloc_page(nv_gfp_pool);
            if (!pm->pages[i]) goto fail;
        }
    }

    if (create_mapping) {
        /* since the linear kernel mapping uses sections and super-
         * sections rather than PTEs, it's not possible to overwrite
         * it with the correct caching attributes, so use a local
         * mapping */
        pm->addr = vm_map_ram(pm->pages, count, -1, prot);
        if (!pm->addr) {
            pr_err("nv_alloc_pages fail to vmap contiguous area\n");
            goto fail;
        }
    }

    pm->nr_pages = count;
    for (i=0; i<count; i++) {
        SetPageReserved(pm->pages[i]);
        pagemap_flush_page(pm->pages[i]);
    }

    return pm;

fail:
    while (i) __free_page(pm->pages[--i]);
    if (pm) kfree(pm);
    return NULL;
}
Пример #2
0
static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
{
	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
	struct pcpu_chunk *chunk;
	struct page *pages;
	unsigned long flags;
	int i;

	chunk = pcpu_alloc_chunk(gfp);
	if (!chunk)
		return NULL;

	pages = alloc_pages(gfp, order_base_2(nr_pages));
	if (!pages) {
		pcpu_free_chunk(chunk);
		return NULL;
	}

	for (i = 0; i < nr_pages; i++)
		pcpu_set_page_chunk(nth_page(pages, i), chunk);

	chunk->data = pages;
	chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];

	spin_lock_irqsave(&pcpu_lock, flags);
	pcpu_chunk_populated(chunk, 0, nr_pages, false);
	spin_unlock_irqrestore(&pcpu_lock, flags);

	pcpu_stats_chunk_alloc();
	trace_percpu_create_chunk(chunk->base_addr);

	return chunk;
}
static unsigned int process_info(struct page_info *info,
				 struct scatterlist *sg,
				 struct scatterlist *sg_sync,
				 struct pages_mem *data, unsigned int i)
{
	struct page *page = info->page;
	unsigned int j;

	if (sg_sync) {
		sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0);
		sg_dma_address(sg_sync) = page_to_phys(page);
	}
	sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
	/*
	 * This is not correct - sg_dma_address needs a dma_addr_t
	 * that is valid for the the targeted device, but this works
	 * on the currently targeted hardware.
	 */
	sg_dma_address(sg) = page_to_phys(page);
	if (data) {
		for (j = 0; j < (1 << info->order); ++j)
			data->pages[i++] = nth_page(page, j);
	}
	list_del(&info->list);
	kfree(info);
	return i;
}
Пример #4
0
/*
 * In PIO mode we have to map each page separately, using kmap(). That way
 * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
 * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks
 * have been observed with an SDIO WiFi card (b43 driver).
 */
static void usdhi6_blk_bounce(struct usdhi6_host *host,
			      struct scatterlist *sg)
{
	struct mmc_data *data = host->mrq->data;
	size_t blk_head = host->head_len;

	dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
		__func__, host->mrq->cmd->opcode, data->sg_len,
		data->blksz, data->blocks, sg->offset);

	host->head_pg.page	= host->pg.page;
	host->head_pg.mapped	= host->pg.mapped;
	host->pg.page		= nth_page(host->pg.page, 1);
	host->pg.mapped		= kmap(host->pg.page);

	host->blk_page = host->bounce_buf;
	host->offset = 0;

	if (data->flags & MMC_DATA_READ)
		return;

	memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
	       blk_head);
	memcpy(host->bounce_buf + blk_head, host->pg.mapped,
	       data->blksz - blk_head);
}
Пример #5
0
static inline void
dma_sync_high(struct scatterlist *sg, enum dma_data_direction direction)
{
	int i;
	unsigned int nr_pages;
	struct page * tmp_page;
	unsigned int offset;
	unsigned int length, length_remain;
	unsigned long addr;

	nr_pages = (sg->length + sg->offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
	length_remain = sg->length;

	for (i = 0, offset = sg->offset; i < nr_pages; i++, offset = 0) {
		tmp_page = nth_page(sg->page, i);

		length = (length_remain > (PAGE_SIZE - offset))?
			(PAGE_SIZE - offset): length_remain;

		addr = (unsigned long)kmap(tmp_page);
		__dma_sync(addr + offset, length, direction);
		kunmap(tmp_page);
		length_remain -= length;
	}
}
Пример #6
0
static struct pcpu_chunk *pcpu_create_chunk(void)
{
	const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
	struct pcpu_chunk *chunk;
	struct page *pages;
	int i;

	chunk = pcpu_alloc_chunk();
	if (!chunk)
		return NULL;

	pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages));
	if (!pages) {
		pcpu_free_chunk(chunk);
		return NULL;
	}

	for (i = 0; i < nr_pages; i++)
		pcpu_set_page_chunk(nth_page(pages, i), chunk);

	chunk->data = pages;
	chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];

	spin_lock_irq(&pcpu_lock);
	pcpu_chunk_populated(chunk, 0, nr_pages);
	spin_unlock_irq(&pcpu_lock);

	return chunk;
}
Пример #7
0
/**
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
 * @to_buffer: 		 transfer direction (non zero == from an sg list to a
 * 			 buffer, 0 == from a buffer to an sg list
 *
 * Returns the number of copied bytes.
 *
 **/
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
			     void *buf, size_t buflen, int to_buffer)
{
	struct scatterlist *sg;
	size_t buf_off = 0;
	int i;
	unsigned long flags;

	local_irq_save(flags);

	for_each_sg(sgl, sg, nents, i) {
		struct page *page;
		int n = 0;
		unsigned int sg_off = sg->offset;
		unsigned int sg_copy = sg->length;

		if (sg_copy > buflen)
			sg_copy = buflen;
		buflen -= sg_copy;

		while (sg_copy > 0) {
			unsigned int page_copy;
			void *p;

			page_copy = PAGE_SIZE - sg_off;
			if (page_copy > sg_copy)
				page_copy = sg_copy;

			page = nth_page(sg_page(sg), n);
			p = kmap_atomic(page, KM_BIO_SRC_IRQ);

			if (to_buffer)
				memcpy(buf + buf_off, p + sg_off, page_copy);
			else {
				memcpy(p + sg_off, buf + buf_off, page_copy);
				flush_kernel_dcache_page(page);
			}

			kunmap_atomic(p, KM_BIO_SRC_IRQ);

			buf_off += page_copy;
			sg_off += page_copy;
			if (sg_off == PAGE_SIZE) {
				sg_off = 0;
				n++;
			}
			sg_copy -= page_copy;
		}

		if (!buflen)
			break;
	}

	local_irq_restore(flags);

	return buf_off;
}
Пример #8
0
/**
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
 * @to_buffer:		 transfer direction (non zero == from an sg list to a
 *			 buffer, 0 == from a buffer to an sg list
 *
 * Returns the number of copied bytes.
 *
 **/
static size_t vtl_sg_copy_user(struct scatterlist *sgl, unsigned int nents,
				__user void *buf, size_t buflen, int to_buffer)
{
	struct scatterlist *sg;
	size_t buf_off = 0;
	int i;
	int ret;

	for_each_sg(sgl, sg, nents, i) {
		struct page *page;
		int n = 0;
		unsigned int sg_off = sg->offset;
		unsigned int sg_copy = sg->length;

		if (sg_copy > buflen)
			sg_copy = buflen;
		buflen -= sg_copy;

		while (sg_copy > 0) {
			unsigned int page_copy;
			void *p;

			page_copy = PAGE_SIZE - sg_off;
			if (page_copy > sg_copy)
				page_copy = sg_copy;

			page = nth_page(sg_page(sg), n);
			p = kmap_atomic(page, KM_BIO_SRC_IRQ);

			if (to_buffer)
				ret = copy_to_user(buf + buf_off, p + sg_off, page_copy);
			else {
				ret = copy_from_user(p + sg_off, buf + buf_off, page_copy);
				flush_kernel_dcache_page(page);
			}

			kunmap_atomic(p, KM_BIO_SRC_IRQ);

			buf_off += page_copy;
			sg_off += page_copy;
			if (sg_off == PAGE_SIZE) {
				sg_off = 0;
				n++;
			}
			sg_copy -= page_copy;
		}

		if (!buflen)
			break;
	}

	return buf_off;
}
Пример #9
0
static void kgsl_page_pool_zero(struct kgsl_page_pool *pool, struct page *page)
{
	int i;
	trace_kgsl_page_pool_zero_begin(pool->order);
	for (i = 0; i < (1 << pool->order); i++) {
		struct page *p;
		void *kaddr;
		p = nth_page(page, i);
		kaddr = kmap_atomic(p);
		clear_page(kaddr);
		dmac_flush_range(kaddr, kaddr + PAGE_SIZE);
		kunmap_atomic(kaddr);
	}
	trace_kgsl_page_pool_zero_end(pool->order);

}
Пример #10
0
bool __sg_page_iter_next(struct sg_page_iter *piter)
{
	if (!piter->__nents || !piter->sg)
		return false;

	piter->sg_pgoffset += piter->__pg_advance;
	piter->__pg_advance = 1;

	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
		piter->sg_pgoffset -= sg_page_count(piter->sg);
		piter->sg = sg_next(piter->sg);
		if (!--piter->__nents || !piter->sg)
			return false;
	}
	piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);

	return true;
}
Пример #11
0
/* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */
static void usdhi6_sg_advance(struct usdhi6_host *host)
{
	struct mmc_data *data = host->mrq->data;
	size_t done, total;

	/* New offset: set at the end of the previous block */
	if (host->head_pg.page) {
		/* Finished a cross-page block, jump to the new page */
		host->page_idx++;
		host->offset = data->blksz - host->head_len;
		host->blk_page = host->pg.mapped;
		usdhi6_sg_unmap(host, false);
	} else {
		host->offset += data->blksz;
		/* The completed block didn't cross a page boundary */
		if (host->offset == PAGE_SIZE) {
			/* If required, we'll map the page below */
			host->offset = 0;
			host->page_idx++;
		}
	}

	/*
	 * Now host->blk_page + host->offset point at the end of our last block
	 * and host->page_idx is the index of the page, in which our new block
	 * is located, if any
	 */

	done = (host->page_idx << PAGE_SHIFT) + host->offset;
	total = host->sg->offset + sg_dma_len(host->sg);

	dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
		done, total, host->offset);

	if (done < total && host->offset) {
		/* More blocks in this page */
		if (host->offset + data->blksz > PAGE_SIZE)
			/* We approached at a block, that spans 2 pages */
			usdhi6_blk_bounce(host, host->sg);

		return;
	}

	/* Finished current page or an SG segment */
	usdhi6_sg_unmap(host, false);

	if (done == total) {
		/*
		 * End of an SG segment or the complete SG: jump to the next
		 * segment, we'll map it later in usdhi6_blk_read() or
		 * usdhi6_blk_write()
		 */
		struct scatterlist *next = sg_next(host->sg);

		host->page_idx = 0;

		if (!next)
			host->wait = USDHI6_WAIT_FOR_DATA_END;
		host->sg = next;

		if (WARN(next && sg_dma_len(next) % data->blksz,
			 "SG size %u isn't a multiple of block size %u\n",
			 sg_dma_len(next), data->blksz))
			data->error = -EINVAL;

		return;
	}

	/* We cannot get here after crossing a page border */

	/* Next page in the same SG */
	host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
	host->pg.mapped = kmap(host->pg.page);
	host->blk_page = host->pg.mapped;

	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
		host->mrq->cmd->opcode, host->mrq);
}