Exemplo n.º 1
0
static struct nvos_pagemap *nv_alloc_pages(unsigned int count,
        pgprot_t prot, bool contiguous, int create_mapping)
{
    struct nvos_pagemap *pm;
    size_t size;
    unsigned int i = 0;

    size = sizeof(struct nvos_pagemap) + sizeof(struct page *)*(count-1);
    pm = kzalloc(size, GFP_KERNEL);
    if (!pm)
        return NULL;

    if (count==1) contiguous = true;

    if (contiguous) {
        size_t order = get_order(count << PAGE_SHIFT);
        struct page *compound_page;
        compound_page = alloc_pages(nv_gfp_pool, order);
        if (!compound_page) goto fail;

        split_page(compound_page, order);
        for (i=0; i<count; i++)
            pm->pages[i] = nth_page(compound_page, i);

        for ( ; i < (1<<order); i++)
            __free_page(nth_page(compound_page, i));
        i = count;
    } else {
        for (i=0; i<count; i++) {
            pm->pages[i] = alloc_page(nv_gfp_pool);
            if (!pm->pages[i]) goto fail;
        }
    }

    if (create_mapping) {
        /* since the linear kernel mapping uses sections and super-
         * sections rather than PTEs, it's not possible to overwrite
         * it with the correct caching attributes, so use a local
         * mapping */
        pm->addr = vm_map_ram(pm->pages, count, -1, prot);
        if (!pm->addr) {
            pr_err("nv_alloc_pages fail to vmap contiguous area\n");
            goto fail;
        }
    }

    pm->nr_pages = count;
    for (i=0; i<count; i++) {
        SetPageReserved(pm->pages[i]);
        pagemap_flush_page(pm->pages[i]);
    }

    return pm;

fail:
    while (i) __free_page(pm->pages[--i]);
    if (pm) kfree(pm);
    return NULL;
}
Exemplo n.º 2
0
static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
				     unsigned long size,
				     enum dma_data_direction dma_dir)
{
	struct vb2_vmalloc_buf *buf;
	struct frame_vector *vec;
	int n_pages, offset, i;
	int ret = -ENOMEM;

	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

	buf->dma_dir = dma_dir;
	offset = vaddr & ~PAGE_MASK;
	buf->size = size;
	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
	if (IS_ERR(vec)) {
		ret = PTR_ERR(vec);
		goto fail_pfnvec_create;
	}
	buf->vec = vec;
	n_pages = frame_vector_count(vec);
	if (frame_vector_to_pages(vec) < 0) {
		unsigned long *nums = frame_vector_pfns(vec);

		/*
		 * We cannot get page pointers for these pfns. Check memory is
		 * physically contiguous and use direct mapping.
		 */
		for (i = 1; i < n_pages; i++)
			if (nums[i-1] + 1 != nums[i])
				goto fail_map;
		buf->vaddr = (__force void *)
				ioremap_nocache(nums[0] << PAGE_SHIFT, size);
	} else {
		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
					PAGE_KERNEL);
	}

	if (!buf->vaddr)
		goto fail_map;
	buf->vaddr += offset;
	return buf;

fail_map:
	vb2_destroy_framevec(vec);
fail_pfnvec_create:
	kfree(buf);

	return ERR_PTR(ret);
}
static void *vb2_dma_sg_vaddr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

	BUG_ON(!buf);

	if (!buf->vaddr)
		buf->vaddr = vm_map_ram(buf->pages,
					buf->num_pages,
					-1,
					PAGE_KERNEL);

	/* add offset in case userptr is not page-aligned */
	return buf->vaddr + buf->offset;
}
Exemplo n.º 4
0
static void *vb2_dma_sg_vaddr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

	BUG_ON(!buf);

	if (!buf->vaddr) {
		if (buf->db_attach)
			buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
		else
			buf->vaddr = vm_map_ram(buf->pages,
					buf->num_pages, -1, PAGE_KERNEL);
	}

	/* add offset in case userptr is not page-aligned */
	return buf->vaddr ? buf->vaddr + buf->offset : NULL;
}
Exemplo n.º 5
0
int __init owl_kinfo_init(void)
{
    struct page **pages;
    unsigned int pages_count, pfn, i;
    void __iomem * kinfo_vaddr;

    kinfo = kmalloc(sizeof(struct kernel_reserve_info), GFP_KERNEL);
    if(kinfo == NULL) {
        printk(KERN_ALERT "%s, kmalloc(%d) for kinfo failed!\n",
                __func__, sizeof(struct kernel_reserve_info));
        return -ENOMEM;
    }
    
    pages_count = owl_kinfo_size >> PAGE_SHIFT;
    pages = kmalloc(sizeof *pages * pages_count, GFP_KERNEL);
    if (!pages) {
        printk(KERN_ALERT "%s, kmalloc(%d) for pages failed!\n",
                __func__, sizeof *pages * pages_count);
        return -ENOMEM;
    }
    pfn = PFN_DOWN(owl_kinfo_start);
    for (i = 0; i < pages_count; ++i)
        pages[i] = pfn_to_page(pfn + i);
    kinfo_vaddr = vm_map_ram(pages, pages_count, -1, PAGE_KERNEL);
    if(kinfo_vaddr == NULL) {
        printk(KERN_ALERT "%s, ioremap(0x%x, 0x%x) for kinfo failed!\n",
                __func__, owl_kinfo_start, owl_kinfo_size);
        return -ENOMEM;
    }
    memcpy(kinfo, kinfo_vaddr, sizeof(struct kernel_reserve_info));
    vm_unmap_ram(kinfo_vaddr, pages_count);
    kfree(pages);
    
    free_owl_reserved_memory(owl_kinfo_start, owl_kinfo_size);
    return 0;
}
Exemplo n.º 6
0
static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
{
	struct mock_dmabuf *mock = to_mock(dma_buf);

	return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
}
Exemplo n.º 7
0
static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
				     unsigned long size, int write)
{
	struct vb2_vmalloc_buf *buf;
	unsigned long first, last;
	int n_pages, offset;
	struct vm_area_struct *vma;
	dma_addr_t physp;

	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->write = write;
	offset = vaddr & ~PAGE_MASK;
	buf->size = size;


	vma = find_vma(current->mm, vaddr);
	if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
		if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
			goto fail_pages_array_alloc;
		buf->vma = vma;
		buf->vaddr = ioremap_nocache(physp, size);
		if (!buf->vaddr)
			goto fail_pages_array_alloc;
	} else {
		first = vaddr >> PAGE_SHIFT;
		last  = (vaddr + size - 1) >> PAGE_SHIFT;
		buf->n_pages = last - first + 1;
		buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
				     GFP_KERNEL);
		if (!buf->pages)
			goto fail_pages_array_alloc;

		/* current->mm->mmap_sem is taken by videobuf2 core */
		n_pages = get_user_pages(current, current->mm,
					 vaddr & PAGE_MASK, buf->n_pages,
					 write, 1, /* force */
					 buf->pages, NULL);
		if (n_pages != buf->n_pages)
			goto fail_get_user_pages;

		buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
					PAGE_KERNEL);
		if (!buf->vaddr)
			goto fail_get_user_pages;
	}

	buf->vaddr += offset;
	return buf;

fail_get_user_pages:
	pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
		 buf->n_pages);
	while (--n_pages >= 0)
		put_page(buf->pages[n_pages]);
	kfree(buf->pages);

fail_pages_array_alloc:
	kfree(buf);

	return NULL;
}