Exemplo n.º 1
0
static void rvfree(void *mem, unsigned long size)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
    struct page *page;
#endif
    unsigned long adr;

    if (!mem)
        return;

    adr = (unsigned long) mem;
    while ((long) size > 0) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
        page = vmalloc_to_page((void *)adr);
        mem_map_unreserve(page);
#else
        ClearPageReserved(vmalloc_to_page((void *)adr));
#endif
        adr += PAGE_SIZE;
        size -= PAGE_SIZE;
    }
    vfree(mem);
}
Exemplo n.º 2
0
static void __isp_stat_buf_sync_magic(struct ispstat *stat,
				      struct ispstat_buffer *buf,
				      u32 buf_size, enum dma_data_direction dir,
				      void (*dma_sync)(struct device *,
					dma_addr_t, unsigned long, size_t,
					enum dma_data_direction))
{
	struct device *dev = stat->isp->dev;
	struct page *pg;
	dma_addr_t dma_addr;
	u32 offset;

	/* Initial magic words */
	pg = vmalloc_to_page(buf->virt_addr);
	dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
	dma_sync(dev, dma_addr, 0, MAGIC_SIZE, dir);

	/* Final magic words */
	pg = vmalloc_to_page(buf->virt_addr + buf_size);
	dma_addr = pfn_to_dma(dev, page_to_pfn(pg));
	offset = ((u32)buf->virt_addr + buf_size) & ~PAGE_MASK;
	dma_sync(dev, dma_addr, offset, MAGIC_SIZE, dir);
}
Exemplo n.º 3
0
static void free_area(struct pstore *ps)
{
	size_t i, nr_pages;
	struct page *page;

	nr_pages = sectors_to_pages(ps->chunk_size);
	for (i = 0; i < nr_pages; i++) {
		page = vmalloc_to_page(ps->area + (i * PAGE_SIZE));
		page->list.next = NULL;
		UnlockPage(page);
	}

	vfree(ps->area);
}
Exemplo n.º 4
0
/* page fault handler */
static int map_vfault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	unsigned long offset = vmf->pgoff;
	/* determine the offset within the vmalloc'd area  */
	//offset = address - vma->vm_start + (vma->vm_pgoff << PAGE_SHIFT);
	/* translate the vmalloc address to kmalloc address  */
	
	/* increment the usage count of the page */
	vmf->page = vmalloc_to_page(vmalloc_area + (offset << PAGE_SHIFT));
	get_page(vmf->page);
	printk("map_drv: page fault for offset 0x%lx (kseg 0x%p)\n", offset, vmalloc_area);

	return 0;
}
Exemplo n.º 5
0
static long kgsl_cache_range_op(unsigned long addr, int size,
					unsigned int flags)
{
#ifdef CONFIG_OUTER_CACHE
	unsigned long end;
#endif
	BUG_ON(addr & (KGSL_PAGESIZE - 1));
	BUG_ON(size & (KGSL_PAGESIZE - 1));

	if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
		dmac_flush_range((const void *)addr,
				(const void *)(addr + size));
	else
		if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
			dmac_clean_range((const void *)addr,
					(const void *)(addr + size));
		else if (flags & KGSL_MEMFLAGS_CACHE_INV)
			dmac_inv_range((const void *)addr,
					(const void *)(addr + size));

#ifdef CONFIG_OUTER_CACHE
	for (end = addr; end < (addr + size); end += KGSL_PAGESIZE) {
		unsigned long physaddr;
		if (flags & KGSL_MEMFLAGS_VMALLOC_MEM)
			physaddr = page_to_phys(vmalloc_to_page((void *) end));
		else
			if (flags & KGSL_MEMFLAGS_HOSTADDR) {
				physaddr = kgsl_virtaddr_to_physaddr(end);
				if (!physaddr) {
					KGSL_MEM_ERR
					("Unable to find physaddr for "
					"address: %x\n", (unsigned int)end);
					return -EINVAL;
				}
			} else
				return -EINVAL;

		if (flags & KGSL_MEMFLAGS_CACHE_FLUSH)
			outer_flush_range(physaddr, physaddr + KGSL_PAGESIZE);
		else
			if (flags & KGSL_MEMFLAGS_CACHE_CLEAN)
				outer_clean_range(physaddr,
					physaddr + KGSL_PAGESIZE);
			else if (flags & KGSL_MEMFLAGS_CACHE_INV)
				outer_inv_range(physaddr,
					physaddr + KGSL_PAGESIZE);
	}
#endif
	return 0;
}
Exemplo n.º 6
0
static int cdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) {
     unsigned long offset;
     void * our_addr;
 			struct vir_device *dev = vma->vm_private_data;
     offset = (unsigned long)vmf->virtual_address - (unsigned long)vma->vm_start; // address offset
     if (offset >= dev->buf_len) 
     {  
     		return -VM_FAULT_NOPAGE; 
     }  
     our_addr = dev->buf + offset; // get the kernel virtual address
     vmf->page = vmalloc_to_page(our_addr);  // address to page
     get_page(vmf->page);  // add the ref count 
     return 0;
}
Exemplo n.º 7
0
struct page *brick_iomap(void *data, int *offset, int *len)
{
	int _offset = ((unsigned long)data) & (PAGE_SIZE-1);
	struct page *page;
	*offset = _offset;
	if (*len > PAGE_SIZE - _offset) {
		*len = PAGE_SIZE - _offset;
	}
	if (is_vmalloc_addr(data)) {
		page = vmalloc_to_page(data);
	} else {
		page = virt_to_page(data);
	}
	return page;
}
Exemplo n.º 8
0
static void rvfree(void *mem, unsigned long size)
{
	unsigned long adr;

	if (!mem)
		return;

	adr = (unsigned long) mem;
	while ((long) size > 0) {
		ClearPageReserved(vmalloc_to_page((void *)adr));
		adr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	vfree(mem);
}
Exemplo n.º 9
0
static void rvnail(int *mem,unsigned long size)
{
  int i;
  printk(KERN_INFO " before vrnail %ld \n",size);      
  if (mem) 
    {
      // memset(mem, 0, size);  
      /* reserve the pages */			    
      for (i = 0; i < size; i+= PAGE_SIZE) {
	SetPageReserved(vmalloc_to_page((void *)(((unsigned long)mem) + i)));
      }
      printk(KERN_INFO " pages have been nailed %ld \n",size);
    } 
  return ;
}
Exemplo n.º 10
0
static int dma_region_pagefault(struct vm_area_struct *vma,
				struct vm_fault *vmf)
{
	struct dma_region *dma = (struct dma_region *)vma->vm_private_data;

	if (!dma->kvirt)
		return VM_FAULT_SIGBUS;

	if (vmf->pgoff >= dma->n_pages)
		return VM_FAULT_SIGBUS;

	vmf->page = vmalloc_to_page(dma->kvirt + (vmf->pgoff << PAGE_SHIFT));
	get_page(vmf->page);
	return 0;
}
Exemplo n.º 11
0
static int spu_alloc_lscsa_std(struct spu_state *csa)
{
	struct spu_lscsa *lscsa;
	unsigned char *p;

	lscsa = vzalloc(sizeof(struct spu_lscsa));
	if (!lscsa)
		return -ENOMEM;
	csa->lscsa = lscsa;

	/* Set LS pages reserved to allow for user-space mapping. */
	for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
		SetPageReserved(vmalloc_to_page(p));

	return 0;
}
Exemplo n.º 12
0
/*
 * Free image buffer
 */
static void free_image_buf(void)
{
	void *addr;
	int size;

	addr = image_data.data;
	size = PAGE_ALIGN(image_data.size);
	while (size > 0) {
		ClearPageReserved(vmalloc_to_page(addr));
		addr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	vfree(image_data.data);
	image_data.data = NULL;
	image_data.status = IMAGE_INVALID;
}
struct page * pxhs_module_buf_vm_nopage(struct vm_area_struct * vma,
									   unsigned long address,
					   int * type)
{
	unsigned int offset;
	struct page *p = 0;

	offset = (address - vma->vm_start ) + (vma->vm_pgoff << PAGE_SHIFT);

	p = vmalloc_to_page(g_module_buffer.buffer.address + offset);
	*type = VM_FAULT_MINOR;

	get_page(p);
	
	return p;
}
Exemplo n.º 14
0
static int __init mapdrv_init(void)
{
	int i, result, err;
	dev_t dev = 0;
	unsigned long addr = 0;
	
	md = kmalloc(sizeof(struct mapdrv), GFP_KERNEL);
	if (!md)
		goto fail1;
	result = alloc_chrdev_region(&dev, 0, 1, "mapdrv0");
	major = MAJOR(dev);
	if (result < 0) {
		printk(KERN_WARNING "mapdrv: can't get major %d\n", major);
		goto fail2;
	}
	cdev_init(&md->mapdev, &mapdrv_fops);
	md->mapdev.owner = THIS_MODULE;
	md->mapdev.ops = &mapdrv_fops;
	err = cdev_add (&md->mapdev, dev, 1);
	if (err) 
	{
		printk(KERN_NOTICE "Error %d adding mapdrv", err);
		goto fail3;
	}
	atomic_set(&md->usage, 0);
	/* get a memory area that is only virtual contigous. */
	vmalloc_area = vmalloc_user(MAPLEN);
	if (!vmalloc_area)
		goto fail4;
	/* set a hello message to kernel space for read by user */
	addr = (unsigned long)vmalloc_area;
	for (i=0; i<10; i++)
	{
		sprintf((char *)addr, "hello world from kernel space %d!", i);
		addr += PAGE_SIZE;
	}
	printk("vmalloc_area at 0x%p (phys 0x%lx)\n", vmalloc_area, page_to_pfn(vmalloc_to_page(vmalloc_area)) << PAGE_SHIFT);
	return 0;
fail4:
	cdev_del(&md->mapdev);	
fail3:
	unregister_chrdev_region(dev, 1);
fail2:
	kfree(md);
fail1:
	return -1;
}
Exemplo n.º 15
0
/*!
******************************************************************************

 @Function                SECDEV_CpuVirtAddrToCpuPAddr

******************************************************************************/
IMG_PHYSADDR SECDEV_CpuVirtAddrToCpuPAddr(
	IMG_VOID *pvCpuKmAddr
)
{
#ifdef FPGA_BUS_MASTERING
    IMG_PHYSADDR ret = 0;

    if(virt_addr_valid(pvCpuKmAddr))
    {
        /* direct mapping of kernel addresses.
         * this works for kmalloc.
         */
        ret = virt_to_phys(pvCpuKmAddr);
    }
    else
    {
        /* walk the page table.
         * Works for ioremap, vmalloc, and kmalloc(GPF_DMA),
          but not, for some reason, kmalloc(GPF_KERNEL)
         */
        struct page * pg = vmalloc_to_page(pvCpuKmAddr);
        if(pg) {
            ret = page_to_phys(pg);
        }
        else {
            IMG_ASSERT(!"vmalloc_to_page failure");
        }
    }

    return ret;
#else
	int i;
	IMG_UINTPTR uipOffset = 0;

	for(i = 0; i < PCI_MAX_REGIONS; i++)
	{
		if (((IMG_UINTPTR)pvCpuKmAddr >= (IMG_UINTPTR)gsPCIMem[i].pvKmAddr) &&
				((IMG_UINTPTR)pvCpuKmAddr < (IMG_UINTPTR)gsPCIMem[i].pvKmAddr + gsPCIMem[i].size))
		{
			uipOffset = (IMG_UINTPTR)pvCpuKmAddr - (IMG_UINTPTR)gsPCIMem[i].pvKmAddr;
			return gsPCIMem[i].addr + (IMG_PHYSADDR)uipOffset;
		}
	}

	return 0;
#endif
}
Exemplo n.º 16
0
static int allocate_buffer(void)
{
	int i;

	if ((vmalloc_buffer = vzalloc(NPAGES * HASH_SIZE)) == NULL) {
		return -ENOMEM;
	}

	/* Set PG_RESERVED bit of pages to avoid MMU from swapping out the pages */
	/* Done for every page */
	for (i = 0; i < NPAGES; i++) {
		SetPageReserved(vmalloc_to_page((void*)(((unsigned long)vmalloc_buffer)
							+ (i*HASH_SIZE))));
	}

	return 0;
}
int module_buf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	unsigned long offset;

	offset = vmf->pgoff << PAGE_SHIFT;
	
	if (offset > g_module_buffer.buffer.size)
	{
		return VM_FAULT_OOM;
	}

	vmf->page = vmalloc_to_page((void *)g_module_buffer.buffer.address + offset);

	get_page(vmf->page);
	
	return 0;
}
Exemplo n.º 18
0
int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
			void *vaddr, unsigned int offset, unsigned int length,
			unsigned int cmd)
{
	unsigned long vstart, pstart;
	void *vtemp;
	unsigned long ln = 0;
	void (*op)(unsigned long, unsigned long, unsigned long);

	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
		op = clean_caches;
		break;
	case ION_IOC_INV_CACHES:
		op = invalidate_caches;
		break;
	case ION_IOC_CLEAN_INV_CACHES:
		op = clean_and_invalidate_caches;
		break;
	default:
		return -EINVAL;
	}

	for (vtemp = buffer->priv_virt + offset,
	     vstart = (unsigned long) vaddr;
			ln < length;
			vtemp += PAGE_SIZE, ln += PAGE_SIZE,
			vstart += PAGE_SIZE) {
		pstart = page_to_phys(vmalloc_to_page(vtemp));
		/*
		 * If vmalloc -> page -> phys is returning NULL, something
		 * has really gone wrong...
		 */
		if (!pstart) {
			WARN(1, "Could not translate %p to physical address\n",
				vtemp);
			return -EINVAL;
		}

		op(vstart, PAGE_SIZE, pstart);
	}

	return 0;
}
Exemplo n.º 19
0
/**
 * 分配显存
 * @param  size 显存大小
 * @return      指针
 */
static void *rvmalloc(unsigned long size)
{
	void *mem;
	unsigned long adr;

	size = PAGE_ALIGN(size);
	mem = vmalloc_32(size);
	if (!mem)
		return NULL;

	memset(mem, 0, size); /* Clear the ram out, no junk to the user */
	adr = (unsigned long) mem;
	while (size > 0) {
		SetPageReserved(vmalloc_to_page((void *)adr));
		adr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	return mem;
}
Exemplo n.º 20
0
int pg_dump(struct r2k_map *k_map)
{
        unsigned i, j;
	int size;
	unsigned long addr;

        for (i = 0; i < ARRAY_SIZE(pg_level); i++)
               	if (pg_level[i].bits)
                       	for (j = 0; j < pg_level[i].num; j++)
                               	pg_level[i].mask |= pg_level[i].bits[j].mask;

#ifdef CONFIG_ARM
        address_markers[2].start_address = VMALLOC_START;
#endif
	ro = 1;
	walk_pgd (k_map);

	size = n_entries * sizeof (struct kernel_map_info);
	
	k_map->map_info = vmalloc (size);
	if (!k_map->map_info) {
		pr_info ("vmalloc error\n");
		return -ENOMEM;
	}

	size = PAGE_ALIGN (size);
	start_vmalloc_allocated = (unsigned long)k_map->map_info;
	end_vmalloc_allocated = start_vmalloc_allocated + size + PAGE_SIZE;

	addr = start_vmalloc_allocated;
	for (addr = start_vmalloc_allocated ; 
		addr < end_vmalloc_allocated - PAGE_SIZE; addr += PAGE_SIZE) 
		SetPageReserved (vmalloc_to_page ((void*)addr));
	
	ro = 0;
	walk_pgd (k_map);

	k_map->kernel_maps_info.size = size;
	k_map->kernel_maps_info.n_entries = entry;

	start_vmalloc_allocated = end_vmalloc_allocated = n_entries = entry = 0;
	return 0;
}
Exemplo n.º 21
0
int scullv_vma_fault(struct vm_area_struct *vma,struct vm_fault *vmf)
{
	unsigned long offset;
	struct scullv_dev *ptr, *dev = vma->vm_private_data;
	struct page *page;
	void *pageptr = NULL; /* default to "missing" */

	down(&dev->sem);
  offset = (unsigned long) (vmf->virtual_address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
  if (offset >= dev->size) { 
  up(&dev->sem);
  return VM_FAULT_SIGBUS;
  } /* out of range */

	/*
	 * Now retrieve the scullv device from the list,then the page.
	 * If the device has holes, the process receives a SIGBUS when
	 * accessing the hole.
	 */
	offset >>= PAGE_SHIFT; /* offset is a number of pages */
	for (ptr = dev; ptr && offset >= dev->qset;) {
		ptr = ptr->next;
		offset -= dev->qset;
	}
	if (ptr && ptr->data) pageptr = ptr->data[offset];
  if (!pageptr) { 
  up(&dev->sem);
  return VM_FAULT_SIGBUS;
  } /* hole or end-of-file */
  
	/*
	 * After scullv lookup, "page" is now the address of the page
	 * needed by the current process. Since it's a vmalloc address,
	 * turn it into a struct page.
	 */
	page = vmalloc_to_page(pageptr);

	/* got it, now increment the count */
	get_page(page);
	vmf->page = page;
	up(&dev->sem);
	return 0;
}
Exemplo n.º 22
0
/**
 * nx_build_sg_list - build an NX scatter list describing a single  buffer
 *
 * @sg_head: pointer to the first scatter list element to build
 * @start_addr: pointer to the linear buffer
 * @len: length of the data at @start_addr
 * @sgmax: the largest number of scatter list elements we're allowed to create
 *
 * This function will start writing nx_sg elements at @sg_head and keep
 * writing them until all of the data from @start_addr is described or
 * until sgmax elements have been written. Scatter list elements will be
 * created such that none of the elements describes a buffer that crosses a 4K
 * boundary.
 */
struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
			       u8           *start_addr,
			       unsigned int  len,
			       u32           sgmax)
{
	unsigned int sg_len = 0;
	struct nx_sg *sg;
	u64 sg_addr = (u64)start_addr;
	u64 end_addr;

	/* determine the start and end for this address range - slightly
	 * different if this is in VMALLOC_REGION */
	if (is_vmalloc_addr(start_addr))
		sg_addr = page_to_phys(vmalloc_to_page(start_addr))
			  + offset_in_page(sg_addr);
	else
		sg_addr = __pa(sg_addr);

	end_addr = sg_addr + len;

	/* each iteration will write one struct nx_sg element and add the
	 * length of data described by that element to sg_len. Once @len bytes
	 * have been described (or @sgmax elements have been written), the
	 * loop ends. min_t is used to ensure @end_addr falls on the same page
	 * as sg_addr, if not, we need to create another nx_sg element for the
	 * data on the next page */
	for (sg = sg_head; sg_len < len; sg++) {
		sg->addr = sg_addr;
		sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr);
		sg->len = sg_addr - sg->addr;
		sg_len += sg->len;

		if ((sg - sg_head) == sgmax) {
			pr_err("nx: scatter/gather list overflow, pid: %d\n",
			       current->pid);
			return NULL;
		}
	}

	/* return the moved sg_head pointer */
	return sg;
}
Exemplo n.º 23
0
static int sel_mmap_policy_fault(struct vm_fault *vmf)
{
	struct policy_load_memory *plm = vmf->vma->vm_file->private_data;
	unsigned long offset;
	struct page *page;

	if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
		return VM_FAULT_SIGBUS;

	offset = vmf->pgoff << PAGE_SHIFT;
	if (offset >= roundup(plm->len, PAGE_SIZE))
		return VM_FAULT_SIGBUS;

	page = vmalloc_to_page(plm->data + offset);
	get_page(page);

	vmf->page = page;

	return 0;
}
Exemplo n.º 24
0
/*
 * Allocate image buffer.
 */
static int alloc_image_buf(char *buffer, size_t count)
{
	void *addr;
	int size;

	if (count < sizeof(struct image_header_t)) {
		pr_warn("FLASH: Invalid candidate image\n");
		return -EINVAL;
	}

	memcpy(&image_header, (void *)buffer, sizeof(struct image_header_t));
	image_data.size = be32_to_cpu(image_header.size);
	pr_debug("FLASH: Candiate image size = %u\n", image_data.size);

	if (image_data.size > MAX_IMAGE_SIZE) {
		pr_warn("FLASH: Too large image\n");
		return -EINVAL;
	}
	if (image_data.size < VALIDATE_BUF_SIZE) {
		pr_warn("FLASH: Image is shorter than expected\n");
		return -EINVAL;
	}

	image_data.data = vzalloc(PAGE_ALIGN(image_data.size));
	if (!image_data.data) {
		pr_err("%s : Failed to allocate memory\n", __func__);
		return -ENOMEM;
	}

	/* Pin memory */
	addr = image_data.data;
	size = PAGE_ALIGN(image_data.size);
	while (size > 0) {
		SetPageReserved(vmalloc_to_page(addr));
		addr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}

	image_data.status = IMAGE_LOADING;
	return 0;
}
Exemplo n.º 25
0
static int cx23885_alsa_dma_init(struct cx23885_audio_dev *chip, int nr_pages)
{
	struct cx23885_audio_buffer *buf = chip->buf;
	struct page *pg;
	int i;

	buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
	if (NULL == buf->vaddr) {
		dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
		return -ENOMEM;
	}

	dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
				(unsigned long)buf->vaddr,
				nr_pages << PAGE_SHIFT);

	memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
	buf->nr_pages = nr_pages;

	buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
	if (NULL == buf->sglist)
		goto vzalloc_err;

	sg_init_table(buf->sglist, buf->nr_pages);
	for (i = 0; i < buf->nr_pages; i++) {
		pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
		if (NULL == pg)
			goto vmalloc_to_page_err;
		sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
	}
	return 0;

vmalloc_to_page_err:
	vfree(buf->sglist);
	buf->sglist = NULL;
vzalloc_err:
	vfree(buf->vaddr);
	buf->vaddr = NULL;
	return -ENOMEM;
}
Exemplo n.º 26
0
static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
	struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
	enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
{
	struct vmmdev_hgcm_pagelist *dst_pg_lst;
	struct page *page;
	bool is_vmalloc;
	u32 i, page_count;

	dst_parm->type = type;

	if (len == 0) {
		dst_parm->u.pointer.size = 0;
		dst_parm->u.pointer.u.linear_addr = 0;
		return;
	}

	dst_pg_lst = (void *)call + *off_extra;
	page_count = hgcm_call_buf_size_in_pages(buf, len);
	is_vmalloc = is_vmalloc_addr(buf);

	dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
	dst_parm->u.page_list.size = len;
	dst_parm->u.page_list.offset = *off_extra;
	dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
	dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
	dst_pg_lst->page_count = page_count;

	for (i = 0; i < page_count; i++) {
		if (is_vmalloc)
			page = vmalloc_to_page(buf);
		else
			page = virt_to_page(buf);

		dst_pg_lst->pages[i] = page_to_phys(page);
		buf += PAGE_SIZE;
	}

	*off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
}
Exemplo n.º 27
0
struct dl_dma_list* 
dl_dma_map_kernel_buffer(void *address, unsigned long size, int direction, int is_vmalloc, void* pdev)
{
	struct page* page;
	int i = 0, offset = 0;
	struct dl_dma_list* sl 			= NULL;
	struct dl_dma_entry *e			= NULL;
	unsigned long num_pages 		= dl_dma_get_num_pages(address, size);
	unsigned long start_addr		= (unsigned long)address;

	start_addr = start_addr - (start_addr % PAGE_SIZE);

	sl = alloc_dl_dma_entry(is_vmalloc == 1 ? num_pages : 1);
	if (!sl)
		return NULL;

	e = first_entry(sl);
	direction = bmd_to_linux_direction(direction);
	
	if (is_vmalloc)
	{
		for (i = 0; i < num_pages; i++)
		{
			page = vmalloc_to_page((void*)(unsigned long)start_addr + offset);
			offset += PAGE_SIZE;
			e->dma_addr = pci_map_page(pdev, page, 0, PAGE_SIZE, direction);
			e = next_entry(e);
		}
		sl->num_pages = num_pages;
	}
	else
	{
		e->dma_addr = pci_map_single(pdev, address, size, direction);
		sl->dma_is_single = 1;
		sl->size = size;
	}
	sl->pdev = pdev;	
	return sl;
}
Exemplo n.º 28
0
/** 
 * @param size Size of memory
 * 
 * @returns Address on the allocated memory
 *
 * @brief Allocate a buffer.
 *
 * This function permits to allocate a buffer in memory.
 */
void * linect_rvmalloc(unsigned long size)
{
	void *mem;
	unsigned long addr;

	size = PAGE_ALIGN(size);
	mem = vmalloc(size);

	if (!mem)
		return NULL;

	memset(mem, 0, size);

	addr = (unsigned long) mem;

	while (size > 0) {
		SetPageReserved(vmalloc_to_page((void *) addr));
		addr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}

	return mem;
}
Exemplo n.º 29
0
/*
 * nopage() vm_op implementation for relayfs file mapping.
 */
static struct page *relay_buf_nopage(struct vm_area_struct *vma,
				     unsigned long address,
				     int *type)
{
	struct page *page;
	struct rchan_buf *buf = vma->vm_private_data;
	unsigned long offset = address - vma->vm_start;

	if (address > vma->vm_end)
		return NOPAGE_SIGBUS; /* Disallow mremap */
	if (!buf)
		return NOPAGE_OOM;

	page = vmalloc_to_page(buf->start + offset);
	if (!page)
		return NOPAGE_OOM;
	get_page(page);

	if (type)
		*type = VM_FAULT_MINOR;

	return page;
}
Exemplo n.º 30
0
/* this is videobuf_vmalloc_to_sg() from videobuf-dma-sg.c
   make sure virt has been allocated with vmalloc_32(), otherwise the BUG()
   may be triggered on highmem machines */
static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
{
    struct scatterlist *sglist;
    struct page *pg;
    int i;

    sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
    if (NULL == sglist)
        return NULL;
    sg_init_table(sglist, nr_pages);
    for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
        pg = vmalloc_to_page(virt);
        if (NULL == pg)
            goto err;
        BUG_ON(PageHighMem(pg));
        sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
    }
    return sglist;

err:
    kfree(sglist);
    return NULL;
}