Ejemplo n.º 1
0
Archivo: kcov.c Proyecto: plaes/linux
static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
{
	int res = 0;
	void *area;
	struct kcov *kcov = vma->vm_file->private_data;
	unsigned long size, off;
	struct page *page;

	area = vmalloc_user(vma->vm_end - vma->vm_start);
	if (!area)
		return -ENOMEM;

	spin_lock(&kcov->lock);
	size = kcov->size * sizeof(unsigned long);
	if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
	    vma->vm_end - vma->vm_start != size) {
		res = -EINVAL;
		goto exit;
	}
	if (!kcov->area) {
		kcov->area = area;
		vma->vm_flags |= VM_DONTEXPAND;
		spin_unlock(&kcov->lock);
		for (off = 0; off < size; off += PAGE_SIZE) {
			page = vmalloc_to_page(kcov->area + off);
			if (vm_insert_page(vma, vma->vm_start + off, page))
				WARN_ONCE(1, "vm_insert_page() failed");
		}
		return 0;
	}
exit:
	spin_unlock(&kcov->lock);
	vfree(area);
	return res;
}
Ejemplo n.º 2
0
int udl_gem_fault(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
	struct page *page;
	unsigned int page_offset;
	int ret = 0;

	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;

	if (!obj->pages)
		return VM_FAULT_SIGBUS;

	page = obj->pages[page_offset];
	ret = vm_insert_page(vma, vmf->address, page);
	switch (ret) {
	case -EAGAIN:
	case 0:
	case -ERESTARTSYS:
		return VM_FAULT_NOPAGE;
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
		return VM_FAULT_SIGBUS;
	}
}
Ejemplo n.º 3
0
static int vperfctr_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct vperfctr *perfctr;

#ifdef CONFIG_ARM
#define _PAGE_RW	L_PTE_WRITE
#endif
	/* Only allow read-only mapping of first page. */
	if ((vma->vm_end - vma->vm_start) != PAGE_SIZE ||
	    vma->vm_pgoff != 0 ||
	    (pgprot_val(vma->vm_page_prot) & _PAGE_RW) ||
	    (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)))
		return -EPERM;
	perfctr = filp->private_data;
	if (!perfctr)
		return -EPERM;
	/* 2.6.29-rc1 changed arch/x86/mm/pat.c to WARN_ON when
	   remap_pfn_range() is applied to plain RAM pages.
	   Comments there indicate that one should set_memory_wc()
	   before the remap, but that doesn't silence the WARN_ON.
	   Luckily vm_insert_page() works without complaints. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
	return vm_insert_page(vma, vma->vm_start, virt_to_page((unsigned long)perfctr));
#else
	return remap_pfn_range(vma, vma->vm_start,
			       virt_to_phys(perfctr) >> PAGE_SHIFT,
			       PAGE_SIZE, vma->vm_page_prot);
#endif
}
Ejemplo n.º 4
0
int omapvout_mem_map(struct vm_area_struct *vma, u32 phy_addr)
{
	struct page *cpage;
	void *pos;
	u32 start;
	u32 size;


	vma->vm_flags |= VM_RESERVED;
	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	pos = (void *) phy_addr;
	start = vma->vm_start;
	size = (vma->vm_end - vma->vm_start);

	while (size > 0) {
		cpage = pfn_to_page(((unsigned int)pos) >> PAGE_SHIFT);
		if (vm_insert_page(vma, start, cpage)) {
			printk(KERN_ERR "Failed to insert page to VMA \n");
			return -EAGAIN;
		}
		start += PAGE_SIZE;
		pos += PAGE_SIZE;
		size -= PAGE_SIZE;
	}

	vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */

	return 0;
}
Ejemplo n.º 5
0
Archivo: gem.c Proyecto: JaneDu/ath
static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct drm_gem_object *gem = vma->vm_private_data;
	struct tegra_bo *bo = to_tegra_bo(gem);
	struct page *page;
	pgoff_t offset;
	int err;

	if (!bo->pages)
		return VM_FAULT_SIGBUS;

	offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
	page = bo->pages[offset];

	err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
	switch (err) {
	case -EAGAIN:
	case 0:
	case -ERESTARTSYS:
	case -EINTR:
	case -EBUSY:
		return VM_FAULT_NOPAGE;

	case -ENOMEM:
		return VM_FAULT_OOM;
	}

	return VM_FAULT_SIGBUS;
}
Ejemplo n.º 6
0
static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
{
	void *pos;
	unsigned long start = vma->vm_start;
	unsigned long size = vma->vm_end - vma->vm_start;
	struct video_device *vdev = video_devdata(file);
	struct zr364xx_camera *cam;

	DBG("zr364xx_mmap: %ld\n", size);

	if (vdev == NULL)
		return -ENODEV;
	cam = video_get_drvdata(vdev);

	pos = cam->framebuf;
	while (size > 0) {
		if (vm_insert_page(vma, start, vmalloc_to_page(pos)))
			return -EAGAIN;
		start += PAGE_SIZE;
		pos += PAGE_SIZE;
		if (size > PAGE_SIZE)
			size -= PAGE_SIZE;
		else
			size = 0;
	}

	return 0;
}
Ejemplo n.º 7
0
static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct privcmd_buf_private *file_priv = file->private_data;
	struct privcmd_buf_vma_private *vma_priv;
	unsigned long count = vma_pages(vma);
	unsigned int i;
	int ret = 0;

	if (!(vma->vm_flags & VM_SHARED) || count > limit ||
	    file_priv->allocated + count > limit)
		return -EINVAL;

	vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
			   GFP_KERNEL);
	if (!vma_priv)
		return -ENOMEM;

	vma_priv->n_pages = count;
	count = 0;
	for (i = 0; i < vma_priv->n_pages; i++) {
		vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
		if (!vma_priv->pages[i])
			break;
		count++;
	}

	mutex_lock(&file_priv->lock);

	file_priv->allocated += count;

	vma_priv->file_priv = file_priv;
	vma_priv->users = 1;

	vma->vm_flags |= VM_IO | VM_DONTEXPAND;
	vma->vm_ops = &privcmd_buf_vm_ops;
	vma->vm_private_data = vma_priv;

	list_add(&vma_priv->list, &file_priv->list);

	if (vma_priv->n_pages != count)
		ret = -ENOMEM;
	else
		for (i = 0; i < vma_priv->n_pages; i++) {
			ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
					     vma_priv->pages[i]);
			if (ret)
				break;
		}

	if (ret)
		privcmd_buf_vmapriv_free(vma_priv);

	mutex_unlock(&file_priv->lock);

	return ret;
}
Ejemplo n.º 8
0
static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
			   struct tgt_ring *ring)
{
	int i, err;

	for (i = 0; i < TGT_RING_PAGES; i++) {
		struct page *page = virt_to_page(ring->tr_pages[i]);
		err = vm_insert_page(vma, addr, page);
		if (err)
			return err;
		addr += PAGE_SIZE;
	}

	return 0;
}
Ejemplo n.º 9
0
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
{
	unsigned long uaddr;
	int i, retval;

	uaddr = vma->vm_start;
	for (i = 0; i < buffer->page_count; i++) {
		retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
		if (retval)
			return retval;
		uaddr += PAGE_SIZE;
	}

	return 0;
}
Ejemplo n.º 10
0
static int
blktap_ring_mmap_sring(struct blktap *tap, struct vm_area_struct *vma)
{
	struct blktap_ring *ring = &tap->ring;
	struct blktap_sring *sring;
	struct page *page = NULL;
	int err;

	if (ring->vma)
		return -EBUSY;

	page = alloc_page(GFP_KERNEL|__GFP_ZERO);
	if (!page)
		return -ENOMEM;

	SetPageReserved(page);

	err = vm_insert_page(vma, vma->vm_start, page);
	if (err)
		goto fail;

	sring = page_address(page);
	SHARED_RING_INIT(sring);
	FRONT_RING_INIT(&ring->ring, sring, PAGE_SIZE);

	ring->ring_vstart = vma->vm_start;
	ring->user_vstart = ring->ring_vstart + PAGE_SIZE;

	vma->vm_private_data = tap;

	vma->vm_flags |= VM_DONTCOPY;
	vma->vm_flags |= VM_RESERVED;

	vma->vm_ops = &blktap_ring_vm_operations;

	ring->vma = vma;
	return 0;

fail:
	if (page) {
		ClearPageReserved(page);
		__free_page(page);
	}

	return err;
}
Ejemplo n.º 11
0
static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma)
{
	int err;

	struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page);

	TRACE_CUR(CTRL_NAME
		  ": mapping %p (pfn:%lx) to 0x%lx (prot:%lx)\n",
		  tsk_rt(t)->ctrl_page,page_to_pfn(ctrl), vma->vm_start,
		  vma->vm_page_prot);

	/* Map it into the vma. */
	err = vm_insert_page(vma, vma->vm_start, ctrl);

	if (err)
		TRACE_CUR(CTRL_NAME ": vm_insert_page() failed (%d)\n", err);

	return err;
}
Ejemplo n.º 12
0
static int
blktap_ring_mmap_request(struct blktap *tap,
			 struct vm_area_struct *vma)
{
	struct blktap_ring *ring = &tap->ring;
	struct blktap_request *request;
	int usr_idx, seg, err;
	unsigned long addr, n_segs;

	usr_idx  = vma->vm_pgoff - 1;
	seg      = usr_idx % BLKTAP_SEGMENT_MAX;
	usr_idx /= BLKTAP_SEGMENT_MAX;

	request = ring->pending[usr_idx];
	if (!request)
		return -EINVAL;

	n_segs = request->nr_pages - seg;
	n_segs = min(n_segs, vma_pages(vma));

	for (addr = vma->vm_start;
	     seg < n_segs;
	     seg++, addr += PAGE_SIZE) {
		struct page *page = request->pages[seg];

		dev_dbg(tap->ring.dev,
			"mmap request %d seg %d addr %lx\n",
			usr_idx, seg, addr);

		err = vm_insert_page(vma, addr, page);
		if (err)
			return err;
	}

	vma->vm_flags |= VM_DONTCOPY;
	vma->vm_flags |= VM_RESERVED;

	return 0;
}
static int
trans_mmap(struct file *f, struct vm_area_struct *vma)
{
    struct trans_channel *c = f->private_data;
    struct page *pg = NULL;
    unsigned long addr, sz, pages;
    int i;
    BUG_ON(!c);

    printl("trans_mmap, sz %d\n", vma->vm_end - vma->vm_start);
    sz = vma->vm_end - vma->vm_start;
    pages = sz/PAGE_SIZE;
    if (sz > TRANS_MAX_MAPPING) return -EINVAL;

    for (addr = vma->vm_start, i = 0 ;
            addr < vma->vm_end ;
            addr += PAGE_SIZE, i++) {
        pg = virt_to_page(&c->mem[PAGE_SIZE*i]);
        BUG_ON(!pg);

        if (vm_insert_page(vma, addr, pg)) {
            zap_vma_ptes(vma, vma->vm_start, addr - vma->vm_start);
            goto err;
        }
        //BUG_ON(pg != follow_page(vma, addr, 0));
    }
    vma->vm_flags |= (VM_RESERVED | VM_INSERTPAGE);
    vma->vm_ops = &trans_vmops;

    BUG_ON(vma->vm_private_data);
    vma->vm_private_data = c;
    c->size = sz;

    return 0;
err:
    return -EAGAIN;
}
Ejemplo n.º 14
0
static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	unsigned long uaddr = vma->vm_start;
	unsigned long usize = vma->vm_end - vma->vm_start;
	int i = 0;

	if (!buf) {
		printk(KERN_ERR "No memory to map\n");
		return -EINVAL;
	}

	do {
		int ret;

		ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
		if (ret) {
			printk(KERN_ERR "Remapping memory, error: %d\n", ret);
			return ret;
		}

		uaddr += PAGE_SIZE;
		usize -= PAGE_SIZE;
	} while (usize > 0);


	/*
	 * Use common vm_area operations to track buffer refcount.
	 */
	vma->vm_private_data	= &buf->handler;
	vma->vm_ops		= &vb2_common_vm_ops;

	vma->vm_ops->open(vma);

	return 0;
}
Ejemplo n.º 15
0
int MMapPMR(struct file *pFile, struct vm_area_struct *ps_vma)
{
	PVRSRV_ERROR eError;
	IMG_HANDLE hSecurePMRHandle;
	IMG_SIZE_T uiLength;
	IMG_DEVMEM_OFFSET_T uiOffset;
	unsigned long uiPFN;
	IMG_HANDLE hPMRResmanHandle;
	PMR *psPMR;
	PMR_FLAGS_T ulPMRFlags;
	IMG_UINT32 ui32CPUCacheFlags;
	unsigned long ulNewFlags = 0;
	pgprot_t sPageProt;
#if defined(SUPPORT_DRM)
	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(PVR_DRM_FILE_FROM_FILE(pFile));
#else
	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
#endif

#if defined(PVR_MMAP_USE_VM_INSERT)
	IMG_BOOL bMixedMap = IMG_FALSE;
#endif
	/*
	 * The pmr lock used here to protect both handle related operations and PMR
	 * operations.
	 * This was introduced to fix lockdep issue.
	 */
	mutex_lock(&g_sMMapMutex);
	PMRLock();

#if defined(SUPPORT_DRM_DC_MODULE)
	psPMR = PVRSRVGEMMMapLookupPMR(pFile, ps_vma);
	if (!psPMR)
#endif
	{
		hSecurePMRHandle = (IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff);

		eError = PVRSRVLookupHandle(psConnection->psHandleBase,
					    (IMG_HANDLE *) &hPMRResmanHandle,
					    hSecurePMRHandle,
					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
		if (eError != PVRSRV_OK)
		{
			goto e0;
		}

		eError = ResManFindPrivateDataByPtr(hPMRResmanHandle,
						    (void **)&psPMR);
		if (eError != PVRSRV_OK)
		{
			goto e0;
		}
	}

	/*
	 * Take a reference on the PMR, make's sure that it can't be freed
	 * while it's mapped into the user process
	 */
	PMRRefPMR(psPMR);

	PMRUnlock();

	eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT);
	if (eError != PVRSRV_OK)
	{
		goto e1;
	}

	if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
	    ((ps_vma->vm_flags & VM_SHARED) == 0))
	{
		eError = PVRSRV_ERROR_INVALID_PARAMS;
		goto e1;
	}

	/*
	 * We ought to call PMR_Flags() here to check the permissions
	 * against the requested mode, and possibly to set up the cache
	 * control protflags
	 */
	eError = PMR_Flags(psPMR, &ulPMRFlags);
	if (eError != PVRSRV_OK)
	{
		goto e1;
	}

	ulNewFlags = ps_vma->vm_flags;
#if 0
	/* Discard user read/write request, we will pull these flags from the PMR */
	ulNewFlags &= ~(VM_READ | VM_WRITE);

	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE)
	{
		ulNewFlags |= VM_READ;
	}
	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)
	{
		ulNewFlags |= VM_WRITE;
	}
#endif

	ps_vma->vm_flags = ulNewFlags;

#if defined (CONFIG_ARM64)
	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, 0, vm_get_page_prot(ulNewFlags));
#elif defined(CONFIG_ARM)
	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags));
#elif defined(CONFIG_X86)
	sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags));
#elif defined(CONFIG_METAG) || defined(CONFIG_MIPS)
	sPageProt = vm_get_page_prot(ulNewFlags);
#else
#error Please add pgprot_modify equivalent for your system
#endif
	ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags);
	switch (ui32CPUCacheFlags)
	{
		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
				sPageProt = pgprot_noncached(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
				sPageProt = pgprot_writecombine(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
				break;

		default:
				eError = PVRSRV_ERROR_INVALID_PARAMS;
				goto e1;
	}
	ps_vma->vm_page_prot = sPageProt;

    uiLength = ps_vma->vm_end - ps_vma->vm_start;

    ps_vma->vm_flags |= VM_IO;

/* Don't include the mapping in core dumps */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
    ps_vma->vm_flags |= VM_DONTDUMP;
#else
    ps_vma->vm_flags |= VM_RESERVED;
#endif

    /*
     * Disable mremap because our nopage handler assumes all
     * page requests have already been validated.
     */
    ps_vma->vm_flags |= VM_DONTEXPAND;
    
    /* Don't allow mapping to be inherited across a process fork */
    ps_vma->vm_flags |= VM_DONTCOPY;

#if defined(PVR_MMAP_USE_VM_INSERT)
	{
		/* Scan the map range for pfns without struct page* handling. If we find
		 * one, this is a mixed map, and we can't use vm_insert_page().
		 */
		for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
		{
			IMG_CPU_PHYADDR sCpuPAddr;
			IMG_BOOL bValid;

			eError = PMR_CpuPhysAddr(psPMR, uiOffset, &sCpuPAddr, &bValid);
			PVR_ASSERT(eError == PVRSRV_OK);
			if (eError)
			{
				goto e2;
			}

			if (bValid)
			{
				uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT;
				PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr);

				if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
				{
					bMixedMap = IMG_TRUE;
				}
			}
		}

		if (bMixedMap)
		{
		    ps_vma->vm_flags |= VM_MIXEDMAP;
		}
	}
#endif /* defined(PVR_MMAP_USE_VM_INSERT) */

    for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
    {
        IMG_SIZE_T uiNumContiguousBytes;
        IMG_INT32 iStatus;
        IMG_CPU_PHYADDR sCpuPAddr;
        IMG_BOOL bValid;

        uiNumContiguousBytes = 1ULL<<PAGE_SHIFT;
        eError = PMR_CpuPhysAddr(psPMR,
                                 uiOffset,
                                 &sCpuPAddr,
                                 &bValid);
        PVR_ASSERT(eError == PVRSRV_OK);
        if (eError)
        {
            goto e2;
        }

		/*
			Only map in pages that are valid, any that aren't will be picked up
			by the nopage handler which will return a zeroed page for us
		*/
		if (bValid)
		{
	        uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT;
	        PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr);

#if defined(PVR_MMAP_USE_VM_INSERT)
			if (bMixedMap)
			{
				/* This path is just for debugging. It should be equivalent
				 * to the remap_pfn_range() path.
				 */
				iStatus = vm_insert_mixed(ps_vma,
										  ps_vma->vm_start + uiOffset,
										  uiPFN);
			}
			else
			{
				iStatus = vm_insert_page(ps_vma,
										 ps_vma->vm_start + uiOffset,
										 pfn_to_page(uiPFN));
			}
#else /* defined(PVR_MMAP_USE_VM_INSERT) */
	        iStatus = remap_pfn_range(ps_vma,
	                                  ps_vma->vm_start + uiOffset,
	                                  uiPFN,
	                                  uiNumContiguousBytes,
	                                  ps_vma->vm_page_prot);
#endif /* defined(PVR_MMAP_USE_VM_INSERT) */

	        PVR_ASSERT(iStatus == 0);
	        if(iStatus)
	        {
	            // N.B. not the right error code, but, it doesn't get propagated anyway... :(
	            eError = PVRSRV_ERROR_OUT_OF_MEMORY;
	
	            goto e2;
	        }

#if defined(PVRSRV_ENABLE_PROCESS_STATS)
    /* USER MAPPING*/
#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
	    PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE);
#else
    	PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
    			 	 	 	 	 (IMG_VOID*)(IMG_UINTPTR_T)(ps_vma->vm_start + uiOffset),
								 sCpuPAddr,
								 PAGE_SIZE,
								 IMG_NULL);
#endif
#endif
		}
        (void)pFile;
    }

    /* let us see the PMR so we can unlock it later */
    ps_vma->vm_private_data = psPMR;

    /* Install open and close handlers for ref-counting */
    ps_vma->vm_ops = &gsMMapOps;

	mutex_unlock(&g_sMMapMutex);

    return 0;

    /*
      error exit paths follow
    */
 e2:
    PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error.  Abort!"));
    PMRUnlockSysPhysAddresses(psPMR);
 e1:
	PMRUnrefPMR(psPMR);
	goto em1;
 e0:
    PVR_DPF((PVR_DBG_ERROR, "Error in MMapPMR critical section"));
	PMRUnlock();
 em1:
    PVR_ASSERT(eError != PVRSRV_OK);
    PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError));
	mutex_unlock(&g_sMMapMutex);

    return -ENOENT; // -EAGAIN // or what?
}
Ejemplo n.º 16
0
static size_t
MksckPageDescMap(Mksck_PageDesc *pd,
		 uint32 pages,
		 struct iovec *iov,
		 int iovCount,
		 struct vm_area_struct *vma)
{
	size_t payloadLen = 0;
	uint32 i;

	for (i = 0; i < pages && pd[i].mpn != INVALID_MPN; ++i) {
		uint32 j;

		for (j = 0; j < 1 << pd[i].order; ++j) {
			HUVA huva = 0;
			struct page *page;
			MPN currMPN = pd[i].mpn + j;

			while (iovCount > 0 && iov->iov_len == 0) {
				iovCount--;
				iov++;
			}

			if (iovCount == 0) {
				pr_warn("MksckPageDescMap: Invalid " \
					"iov length\n");
				goto map_done;
			}

			huva = (HUVA)iov->iov_base;

			if (huva & (PAGE_SIZE - 1) ||
			    iov->iov_len < PAGE_SIZE) {
				pr_warn("MksckPageDescMap: Invalid huva %x " \
					"or iov_len %d\n", huva, iov->iov_len);
				goto map_done;
			}

			if (vma == NULL || huva < vma->vm_start ||
			    huva >= vma->vm_end) {
				vma = find_vma(current->mm, huva);

				if (vma == NULL ||
				    huva < vma->vm_start ||
				    vma->vm_ops != &mksckVMOps) {
					pr_warn("MksckPageDescMap: " \
						"Invalid vma\n");
					goto map_done;
				}
			}

			if (!pfn_valid(currMPN)) {
				pr_warn("MksckPageDescMap: Invalid MPN %x\n",
					currMPN);
			} else {
				int rc;

				page = pfn_to_page(currMPN);

				rc = vm_insert_page(vma, huva, page);
				if (rc) {
					pr_warn("MksckPageDescMap: Failed to " \
						"insert %x at %x, error %d\n",
						currMPN, huva, rc);
					goto map_done;
				}

				ASSERT(iov->iov_len >= PAGE_SIZE);
				iov->iov_base += PAGE_SIZE;
				iov->iov_len -= PAGE_SIZE;
			}

			payloadLen += PAGE_SIZE;
		}
	}

map_done:
	return payloadLen;
}
static int cfag12864bfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
	return vm_insert_page(vma, vma->vm_start,
		virt_to_page(cfag12864b_buffer));
}
Ejemplo n.º 18
0
int MMapPMR(struct file* pFile, struct vm_area_struct* ps_vma)
{
    PVRSRV_ERROR eError;
    IMG_HANDLE hSecurePMRHandle;
    IMG_SIZE_T uiLength;
    IMG_DEVMEM_OFFSET_T uiOffset;
    unsigned long uiPFN;
    IMG_HANDLE hPMRResmanHandle;
    PMR *psPMR;
    PMR_FLAGS_T ulPMRFlags;
    IMG_UINT32 ui32CPUCacheFlags;
    unsigned long ulNewFlags = 0;
    pgprot_t sPageProt;
#if defined(SUPPORT_DRM)
    // INTEL_TEMP
    // SINCE PVR_DRM_FILE_FROM_FILE is NOT found
    CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile->private_data);

    // INTEL_TEMP
    // SINCE PVR_DRM_FILE_FROM_FILE is NOT found
	//if (ps_vma->vm_pgoff > INT_MAX)
	//{
	//	ps_vma->vm_pgoff -= ((unsigned int)INT_MAX + 1);

	//	return MMapGEM(pFile, ps_vma);
	//}
#else
    CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
#endif
	/*
	 * Both PVRSRVLookupHandle and ResManFindPrivateDataByPtr
	 * require the bridge mutex to be held for thread safety.
	 */
	LinuxLockMutex(&gPVRSRVLock);
	LinuxLockMutex(&g_sMMapMutex);

	hSecurePMRHandle=(IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff);

	eError = PVRSRVLookupHandle(psConnection->psHandleBase,
                                (IMG_HANDLE *) &hPMRResmanHandle,
                                hSecurePMRHandle,
                                PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
	if (eError != PVRSRV_OK)
	{
        goto e0;
	}

    eError = ResManFindPrivateDataByPtr(hPMRResmanHandle,
                                        (IMG_VOID **)&psPMR);
	if (eError != PVRSRV_OK)
	{
        goto e0;
	}

	/*
		Take a reference on the PMR, make's sure that it can't be freed
		while it's mapped into the user process
	*/
	PMRRefPMR(psPMR);

	LinuxUnLockMutex(&gPVRSRVLock);

    eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT);
	if (eError != PVRSRV_OK)
	{
        goto e1;
	}

    if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
	((ps_vma->vm_flags & VM_SHARED) == 0))
    {
	eError = PVRSRV_ERROR_INVALID_PARAMS;
	goto e1;
    }

    /*
      we ought to call PMR_Flags() here to check the permissions
      against the requested mode, and possibly to set up the cache
      control protflags
    */
	eError = PMR_Flags(psPMR, &ulPMRFlags);
	if (eError != PVRSRV_OK)
	{
        goto e1;
	}

	ulNewFlags = ps_vma->vm_flags;
#if 0
	/* Discard user read/write request, we will pull these flags from the PMR */
	ulNewFlags &= ~(VM_READ | VM_WRITE);

	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE)
	{
		ulNewFlags |= VM_READ;
	}
	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)
	{
		ulNewFlags |= VM_WRITE;
	}
#endif

	ps_vma->vm_flags = ulNewFlags;

#if defined(__arm__)
	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags));
#elif defined(__i386__) || defined(__x86_64)
	sPageProt = pgprot_modify(ps_vma->vm_page_prot,
							   vm_get_page_prot(ulNewFlags));
#elif defined(__metag__)
	sPageProt = vm_get_page_prot(ulNewFlags);
#else
#error Please add pgprot_modify equivalent for your system
#endif
	ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags);
	switch (ui32CPUCacheFlags)
	{
		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
				sPageProt = pgprot_noncached(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
				sPageProt = pgprot_writecombine(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
				break;

		default:
				eError = PVRSRV_ERROR_INVALID_PARAMS;
				goto e1;
	}
	ps_vma->vm_page_prot = sPageProt;

    uiLength = ps_vma->vm_end - ps_vma->vm_start;

    for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
    {
        IMG_SIZE_T uiNumContiguousBytes;
        IMG_INT32 iStatus;
        IMG_CPU_PHYADDR sCpuPAddr;
        IMG_BOOL bValid;
	struct page *psPage = NULL;

        uiNumContiguousBytes = 1ULL<<PAGE_SHIFT;
        eError = PMR_CpuPhysAddr(psPMR,
                                 uiOffset,
                                 &sCpuPAddr,
                                 &bValid);
        PVR_ASSERT(eError == PVRSRV_OK);
        if (eError)
        {
            goto e2;
        }

		/*
			Only map in pages that are valid, any that aren't will be picked up
			by the nopage handler which will return a zeroed page for us
		*/
		if (bValid)
		{
	        uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT;
	        PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr);

		PVR_ASSERT(pfn_valid(uiPFN));
		psPage = pfn_to_page(uiPFN);
		iStatus = vm_insert_page(ps_vma,
				ps_vma->vm_start + uiOffset,
				psPage);

	        PVR_ASSERT(iStatus == 0);
	        if(iStatus)
	        {
	            // N.B. not the right error code, but, it doesn't get propagated anyway... :(
	            eError = PVRSRV_ERROR_OUT_OF_MEMORY;
	
	            goto e2;
	        }
		}
        (void)pFile;
    }

    ps_vma->vm_flags |= VM_IO;

/* Don't include the mapping in core dumps */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
    ps_vma->vm_flags |= VM_DONTDUMP;
#else
    ps_vma->vm_flags |= VM_RESERVED;
#endif

    /*
     * Disable mremap because our nopage handler assumes all
     * page requests have already been validated.
     */
    ps_vma->vm_flags |= VM_DONTEXPAND;
    
    /* Don't allow mapping to be inherited across a process fork */
    ps_vma->vm_flags |= VM_DONTCOPY;

    /* let us see the PMR so we can unlock it later */
    ps_vma->vm_private_data = psPMR;

    /* Install open and close handlers for ref-counting */
    ps_vma->vm_ops = &gsMMapOps;

	LinuxUnLockMutex(&g_sMMapMutex);

    return 0;

    /*
      error exit paths follow
    */
 e2:
    PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error.  Abort!"));
    PMRUnlockSysPhysAddresses(psPMR);
 e1:
	PMRUnrefPMR(psPMR);
	goto em1;
 e0:
	LinuxUnLockMutex(&gPVRSRVLock);
 em1:
    PVR_ASSERT(eError != PVRSRV_OK);
    PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError));
	LinuxUnLockMutex(&g_sMMapMutex);

    return -ENOENT; // -EAGAIN // or what?
}
Ejemplo n.º 19
0
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
				    void *start, void *end)
{
	void *page_addr;
	unsigned long user_page_addr;
	struct binder_lru_page *page;
	struct vm_area_struct *vma = NULL;
	struct mm_struct *mm = NULL;
	bool need_mm = false;

	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
		     "%d: %s pages %pK-%pK\n", alloc->pid,
		     allocate ? "allocate" : "free", start, end);

	if (end <= start)
		return 0;

	trace_binder_update_page_range(alloc, allocate, start, end);

	if (allocate == 0)
		goto free_range;

	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
		if (!page->page_ptr) {
			need_mm = true;
			break;
		}
	}

	if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
		mm = alloc->vma_vm_mm;

	if (mm) {
		down_read(&mm->mmap_sem);
		vma = alloc->vma;
	}

	if (!vma && need_mm) {
		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
			alloc->pid);
		goto err_no_vma;
	}

	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
		int ret;
		bool on_lru;
		size_t index;

		index = (page_addr - alloc->buffer) / PAGE_SIZE;
		page = &alloc->pages[index];

		if (page->page_ptr) {
			trace_binder_alloc_lru_start(alloc, index);

			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
			WARN_ON(!on_lru);

			trace_binder_alloc_lru_end(alloc, index);
			continue;
		}

		if (WARN_ON(!vma))
			goto err_page_ptr_cleared;

		trace_binder_alloc_page_start(alloc, index);
		page->page_ptr = alloc_page(GFP_KERNEL |
					    __GFP_HIGHMEM |
					    __GFP_ZERO);
		if (!page->page_ptr) {
			pr_err("%d: binder_alloc_buf failed for page at %pK\n",
				alloc->pid, page_addr);
			goto err_alloc_page_failed;
		}
		page->alloc = alloc;
		INIT_LIST_HEAD(&page->lru);

		ret = map_kernel_range_noflush((unsigned long)page_addr,
					       PAGE_SIZE, PAGE_KERNEL,
					       &page->page_ptr);
		flush_cache_vmap((unsigned long)page_addr,
				(unsigned long)page_addr + PAGE_SIZE);
		if (ret != 1) {
			pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
			       alloc->pid, page_addr);
			goto err_map_kernel_failed;
		}
		user_page_addr =
			(uintptr_t)page_addr + alloc->user_buffer_offset;
		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
		if (ret) {
			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
			       alloc->pid, user_page_addr);
			goto err_vm_insert_page_failed;
		}

		if (index + 1 > alloc->pages_high)
			alloc->pages_high = index + 1;

		trace_binder_alloc_page_end(alloc, index);
		/* vm_insert_page does not seem to increment the refcount */
	}
	if (mm) {
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	return 0;

free_range:
	for (page_addr = end - PAGE_SIZE; page_addr >= start;
	     page_addr -= PAGE_SIZE) {
		bool ret;
		size_t index;

		index = (page_addr - alloc->buffer) / PAGE_SIZE;
		page = &alloc->pages[index];

		trace_binder_free_lru_start(alloc, index);

		ret = list_lru_add(&binder_alloc_lru, &page->lru);
		WARN_ON(!ret);

		trace_binder_free_lru_end(alloc, index);
		continue;

err_vm_insert_page_failed:
		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
		__free_page(page->page_ptr);
		page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
		;
	}
err_no_vma:
	if (mm) {
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	return vma ? -ENOMEM : -ESRCH;
}
Ejemplo n.º 20
0
static int ls027b7dh01_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
	return vm_insert_page(vma, vma->vm_start,
	virt_to_page(info->screen_base));
}
Ejemplo n.º 21
0
//for  anonymous mapping
OCL_kernel_struct* gum_get_gctr_pages(struct super_block* sb, struct page ** IVs, 
				     struct page ** keys, struct page ** res, unsigned int count){
	struct GSFS_sb* gsb=(struct GSFS_sb*)sb->s_fs_info;
	OCL_kernel_struct *oks=0;
	
	down(&gsb->gum_struct.gum_struct_sem);
	
	if(likely(gsb->gum_struct.gum_pid)){
		struct vm_area_struct *vma=gsb->gum_struct.gum_vma;
		unsigned long	address=gsb->gum_struct.gum_start_address;
		int 	i;
		unsigned int 	count_pages;
		struct ocl_message *mes=gsb->gum_struct.gum_ocl_mes;
		
		gt(printk("<0>" "gum_get_gctr_pages ** vma: %lx, address: %lx\n",(unsigned long)vma, (unsigned long) address));
		
		count_pages=count/keys_per_page;
		if(count%IVs_per_page)
			count_pages++;
		vma->vm_flags |= VM_INSERTPAGE;
		mes->IVs_start_address=address;
		for(i=0; i<count_pages; i++){
			if(likely(IVs[i])){
				//int k=
				//get_page(IVs[i]);
				//printk("<0>" "IVs k: %d, address: %lx, page: %lx, %d\n",0,address, IVs[i], atomic_read(&IVs[i]->_count));
				vm_insert_page(vma, address, IVs[i]);
				address+=Block_Size;
				//printk("<0>" "IVs k: %d, address: %lx, page: %lx, %d\n",0,address, IVs[i], atomic_read(&IVs[i]->_count));
			}
			else
				goto up_ret;
		}
		
		mes->keys_start_address=address;
		for(i=0; i<count_pages; i++){
			if(likely(keys[i])){
				//int k=
				//get_page(keys[i]);
				vm_insert_page(vma, address, keys[i]);
				address+=Block_Size;
				//printk("<0>" "keys k: %d, address: %lx, page: %lx\n",k,address, keys[i]);
			}
			else
				goto up_ret;
		}
		
		mes->results_start_address=address;
		for(i=0; i<count; i++){
			if(likely(res[i])){
				//int k=
				//get_page(res[i]);
				vm_insert_page(vma, address, res[i]);
				address+=Block_Size;
				//printk("<0>" "res k: %d, address: %lx, page: %lx\n",k,address, res[i]);
			}
			else
				goto up_ret;
		}
		
		mes->pages_count=count;
		
		oks=kzalloc(sizeof(OCL_kernel_struct), GFP_KERNEL);
		atomic_set(&oks->waiters_returned, 0);
		
		oks->results=res;
		oks->IVs=IVs;
		oks->keys=keys;
		oks->results_count=count;
		oks->IVs_pages_count=count_pages;
		
		sema_init(&oks->sem, 0);
		sema_init(&oks->wanu_sem, 0);
		
		mes->kernel_struct=oks;
		mes->type=OCL_Get_Response;
		
		up(&gsb->gum_struct.gum_is_ready_sem);
	}
	else 
		goto up_ret;
	
	return oks;
	
up_ret:
	up(&gsb->gum_struct.gum_struct_sem);
	return 0;
}