static int mtk_mspace_mmap_physical(struct exm_info *info, struct vm_area_struct *vma)
{
    unsigned long size = vma->vm_end - vma->vm_start;
    void * va = NULL;
    unsigned long pa;
    int ret = -EINVAL;

    if ((vma->vm_flags & VM_SHARED) == 0) {
        return -EINVAL;
    }

    vma->vm_ops = &exm_vm_ops;
    va = extmem_malloc_page_align(size);

    if (!va) {
        printk("[EXT_MEM] %s[%d] malloc failed...", __FILE__, __LINE__);
        return -ENOMEM;
    }

    memset(va, 0, size);

    vma->vm_flags |= (VM_DONTCOPY | VM_DONTEXPAND);

	vma->vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK, L_PTE_MT_WRITEBACK);

    pa = get_phys_from_mspace((unsigned long) va);
    ret = remap_pfn_range(vma,
    		       vma->vm_start,
    		       (pa >> PAGE_SHIFT),
    		       size,
    		       vma->vm_page_prot);
    extmem_printk("[EXT_MEM] pa:0x%x, va:0x%x, vma->vm_pgoff:0x%x, vm_start:0x%x, vm_end:0x%x\n", pa, va, vma->vm_pgoff, vma->vm_start, vma->vm_end);

    return ret;
}
Exemplo n.º 2
0
static int exm_mmap_physical(struct vm_area_struct *vma)
{
	struct exm_device *idev = vma->vm_private_data;
	int mi = exm_find_mem_index(vma);
	if (mi < 0)
		return -EINVAL;

	vma->vm_flags |= VM_IO | VM_RESERVED;

#ifdef CONFIG_ARM64
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#else
	vma->vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK, L_PTE_MT_WRITEBACK);
#endif
	return remap_pfn_range(vma,
			       vma->vm_start,
			       idev->info->mem[mi].addr >> PAGE_SHIFT,
			       vma->vm_end - vma->vm_start,
			       vma->vm_page_prot);
}
Exemplo n.º 3
0
static int mtk_mspace_mmap_physical(struct exm_info *info, struct vm_area_struct *vma)
{
    unsigned long size = vma->vm_end - vma->vm_start;
    void * va = NULL;
    unsigned long pa;
    int ret = -EINVAL;

    if ((vma->vm_flags & VM_SHARED) == 0) {
        return -EINVAL;
    }

    vma->vm_ops = &exm_vm_ops;
    va = extmem_malloc_page_align(size);

    if (!va) {
        printk(KERN_ERR "[EXT_MEM] %s failed...\n", __FUNCTION__);
        return -ENOMEM;
    }

    memset(va, 0, size);	
    vma->vm_flags |= (VM_DONTCOPY | VM_DONTEXPAND);

#ifdef CONFIG_ARM64
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#else
	vma->vm_page_prot = __pgprot_modify(vma->vm_page_prot, L_PTE_MT_MASK, L_PTE_MT_WRITEBACK);
#endif
    pa = get_phys_from_mspace((unsigned long) va);
    ret = remap_pfn_range(vma,
    		       vma->vm_start,
    		       (pa >> PAGE_SHIFT),
    		       size,
    		       vma->vm_page_prot);
	extmem_printk("[EXT_MEM] pa:%p, va:%p, vma->vm_pgoff:0x%lx, vm_start:0x%lx, vm_end:0x%lx\n", (void *)pa, va, vma->vm_pgoff, vma->vm_start, vma->vm_end);
	if (ret) {
		printk(KERN_ERR "[EXT_MEM] %s fail ret:%d\n", __FUNCTION__, ret);
	}

    return ret;
}
Exemplo n.º 4
0
int MMapPMR(struct file *pFile, struct vm_area_struct *ps_vma)
{
	PVRSRV_ERROR eError;
	IMG_HANDLE hSecurePMRHandle;
	IMG_SIZE_T uiLength;
	IMG_DEVMEM_OFFSET_T uiOffset;
	unsigned long uiPFN;
	IMG_HANDLE hPMRResmanHandle;
	PMR *psPMR;
	PMR_FLAGS_T ulPMRFlags;
	IMG_UINT32 ui32CPUCacheFlags;
	unsigned long ulNewFlags = 0;
	pgprot_t sPageProt;
#if defined(SUPPORT_DRM)
	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(PVR_DRM_FILE_FROM_FILE(pFile));
#else
	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
#endif

#if defined(PVR_MMAP_USE_VM_INSERT)
	IMG_BOOL bMixedMap = IMG_FALSE;
#endif
	/*
	 * The pmr lock used here to protect both handle related operations and PMR
	 * operations.
	 * This was introduced to fix lockdep issue.
	 */
	mutex_lock(&g_sMMapMutex);
	PMRLock();

#if defined(SUPPORT_DRM_DC_MODULE)
	psPMR = PVRSRVGEMMMapLookupPMR(pFile, ps_vma);
	if (!psPMR)
#endif
	{
		hSecurePMRHandle = (IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff);

		eError = PVRSRVLookupHandle(psConnection->psHandleBase,
					    (IMG_HANDLE *) &hPMRResmanHandle,
					    hSecurePMRHandle,
					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
		if (eError != PVRSRV_OK)
		{
			goto e0;
		}

		eError = ResManFindPrivateDataByPtr(hPMRResmanHandle,
						    (void **)&psPMR);
		if (eError != PVRSRV_OK)
		{
			goto e0;
		}
	}

	/*
	 * Take a reference on the PMR, make's sure that it can't be freed
	 * while it's mapped into the user process
	 */
	PMRRefPMR(psPMR);

	PMRUnlock();

	eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT);
	if (eError != PVRSRV_OK)
	{
		goto e1;
	}

	if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
	    ((ps_vma->vm_flags & VM_SHARED) == 0))
	{
		eError = PVRSRV_ERROR_INVALID_PARAMS;
		goto e1;
	}

	/*
	 * We ought to call PMR_Flags() here to check the permissions
	 * against the requested mode, and possibly to set up the cache
	 * control protflags
	 */
	eError = PMR_Flags(psPMR, &ulPMRFlags);
	if (eError != PVRSRV_OK)
	{
		goto e1;
	}

	ulNewFlags = ps_vma->vm_flags;
#if 0
	/* Discard user read/write request, we will pull these flags from the PMR */
	ulNewFlags &= ~(VM_READ | VM_WRITE);

	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE)
	{
		ulNewFlags |= VM_READ;
	}
	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)
	{
		ulNewFlags |= VM_WRITE;
	}
#endif

	ps_vma->vm_flags = ulNewFlags;

#if defined (CONFIG_ARM64)
	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, 0, vm_get_page_prot(ulNewFlags));
#elif defined(CONFIG_ARM)
	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags));
#elif defined(CONFIG_X86)
	sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags));
#elif defined(CONFIG_METAG) || defined(CONFIG_MIPS)
	sPageProt = vm_get_page_prot(ulNewFlags);
#else
#error Please add pgprot_modify equivalent for your system
#endif
	ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags);
	switch (ui32CPUCacheFlags)
	{
		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
				sPageProt = pgprot_noncached(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
				sPageProt = pgprot_writecombine(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
				break;

		default:
				eError = PVRSRV_ERROR_INVALID_PARAMS;
				goto e1;
	}
	ps_vma->vm_page_prot = sPageProt;

    uiLength = ps_vma->vm_end - ps_vma->vm_start;

    ps_vma->vm_flags |= VM_IO;

/* Don't include the mapping in core dumps */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
    ps_vma->vm_flags |= VM_DONTDUMP;
#else
    ps_vma->vm_flags |= VM_RESERVED;
#endif

    /*
     * Disable mremap because our nopage handler assumes all
     * page requests have already been validated.
     */
    ps_vma->vm_flags |= VM_DONTEXPAND;
    
    /* Don't allow mapping to be inherited across a process fork */
    ps_vma->vm_flags |= VM_DONTCOPY;

#if defined(PVR_MMAP_USE_VM_INSERT)
	{
		/* Scan the map range for pfns without struct page* handling. If we find
		 * one, this is a mixed map, and we can't use vm_insert_page().
		 */
		for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
		{
			IMG_CPU_PHYADDR sCpuPAddr;
			IMG_BOOL bValid;

			eError = PMR_CpuPhysAddr(psPMR, uiOffset, &sCpuPAddr, &bValid);
			PVR_ASSERT(eError == PVRSRV_OK);
			if (eError)
			{
				goto e2;
			}

			if (bValid)
			{
				uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT;
				PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr);

				if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
				{
					bMixedMap = IMG_TRUE;
				}
			}
		}

		if (bMixedMap)
		{
		    ps_vma->vm_flags |= VM_MIXEDMAP;
		}
	}
#endif /* defined(PVR_MMAP_USE_VM_INSERT) */

    for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
    {
        IMG_SIZE_T uiNumContiguousBytes;
        IMG_INT32 iStatus;
        IMG_CPU_PHYADDR sCpuPAddr;
        IMG_BOOL bValid;

        uiNumContiguousBytes = 1ULL<<PAGE_SHIFT;
        eError = PMR_CpuPhysAddr(psPMR,
                                 uiOffset,
                                 &sCpuPAddr,
                                 &bValid);
        PVR_ASSERT(eError == PVRSRV_OK);
        if (eError)
        {
            goto e2;
        }

		/*
			Only map in pages that are valid, any that aren't will be picked up
			by the nopage handler which will return a zeroed page for us
		*/
		if (bValid)
		{
	        uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT;
	        PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr);

#if defined(PVR_MMAP_USE_VM_INSERT)
			if (bMixedMap)
			{
				/* This path is just for debugging. It should be equivalent
				 * to the remap_pfn_range() path.
				 */
				iStatus = vm_insert_mixed(ps_vma,
										  ps_vma->vm_start + uiOffset,
										  uiPFN);
			}
			else
			{
				iStatus = vm_insert_page(ps_vma,
										 ps_vma->vm_start + uiOffset,
										 pfn_to_page(uiPFN));
			}
#else /* defined(PVR_MMAP_USE_VM_INSERT) */
	        iStatus = remap_pfn_range(ps_vma,
	                                  ps_vma->vm_start + uiOffset,
	                                  uiPFN,
	                                  uiNumContiguousBytes,
	                                  ps_vma->vm_page_prot);
#endif /* defined(PVR_MMAP_USE_VM_INSERT) */

	        PVR_ASSERT(iStatus == 0);
	        if(iStatus)
	        {
	            // N.B. not the right error code, but, it doesn't get propagated anyway... :(
	            eError = PVRSRV_ERROR_OUT_OF_MEMORY;
	
	            goto e2;
	        }

#if defined(PVRSRV_ENABLE_PROCESS_STATS)
    /* USER MAPPING*/
#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
	    PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE);
#else
    	PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
    			 	 	 	 	 (IMG_VOID*)(IMG_UINTPTR_T)(ps_vma->vm_start + uiOffset),
								 sCpuPAddr,
								 PAGE_SIZE,
								 IMG_NULL);
#endif
#endif
		}
        (void)pFile;
    }

    /* let us see the PMR so we can unlock it later */
    ps_vma->vm_private_data = psPMR;

    /* Install open and close handlers for ref-counting */
    ps_vma->vm_ops = &gsMMapOps;

	mutex_unlock(&g_sMMapMutex);

    return 0;

    /*
      error exit paths follow
    */
 e2:
    PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error.  Abort!"));
    PMRUnlockSysPhysAddresses(psPMR);
 e1:
	PMRUnrefPMR(psPMR);
	goto em1;
 e0:
    PVR_DPF((PVR_DBG_ERROR, "Error in MMapPMR critical section"));
	PMRUnlock();
 em1:
    PVR_ASSERT(eError != PVRSRV_OK);
    PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError));
	mutex_unlock(&g_sMMapMutex);

    return -ENOENT; // -EAGAIN // or what?
}
Exemplo n.º 5
0
int MMapPMR(struct file* pFile, struct vm_area_struct* ps_vma)
{
    PVRSRV_ERROR eError;
    IMG_HANDLE hSecurePMRHandle;
    IMG_SIZE_T uiLength;
    IMG_DEVMEM_OFFSET_T uiOffset;
    unsigned long uiPFN;
    IMG_HANDLE hPMRResmanHandle;
    PMR *psPMR;
    PMR_FLAGS_T ulPMRFlags;
    IMG_UINT32 ui32CPUCacheFlags;
    unsigned long ulNewFlags = 0;
    pgprot_t sPageProt;
#if defined(SUPPORT_DRM)
    // INTEL_TEMP
    // SINCE PVR_DRM_FILE_FROM_FILE is NOT found
    CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile->private_data);

    // INTEL_TEMP
    // SINCE PVR_DRM_FILE_FROM_FILE is NOT found
	//if (ps_vma->vm_pgoff > INT_MAX)
	//{
	//	ps_vma->vm_pgoff -= ((unsigned int)INT_MAX + 1);

	//	return MMapGEM(pFile, ps_vma);
	//}
#else
    CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
#endif
	/*
	 * Both PVRSRVLookupHandle and ResManFindPrivateDataByPtr
	 * require the bridge mutex to be held for thread safety.
	 */
	LinuxLockMutex(&gPVRSRVLock);
	LinuxLockMutex(&g_sMMapMutex);

	hSecurePMRHandle=(IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff);

	eError = PVRSRVLookupHandle(psConnection->psHandleBase,
                                (IMG_HANDLE *) &hPMRResmanHandle,
                                hSecurePMRHandle,
                                PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
	if (eError != PVRSRV_OK)
	{
        goto e0;
	}

    eError = ResManFindPrivateDataByPtr(hPMRResmanHandle,
                                        (IMG_VOID **)&psPMR);
	if (eError != PVRSRV_OK)
	{
        goto e0;
	}

	/*
		Take a reference on the PMR, make's sure that it can't be freed
		while it's mapped into the user process
	*/
	PMRRefPMR(psPMR);

	LinuxUnLockMutex(&gPVRSRVLock);

    eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT);
	if (eError != PVRSRV_OK)
	{
        goto e1;
	}

    if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
	((ps_vma->vm_flags & VM_SHARED) == 0))
    {
	eError = PVRSRV_ERROR_INVALID_PARAMS;
	goto e1;
    }

    /*
      we ought to call PMR_Flags() here to check the permissions
      against the requested mode, and possibly to set up the cache
      control protflags
    */
	eError = PMR_Flags(psPMR, &ulPMRFlags);
	if (eError != PVRSRV_OK)
	{
        goto e1;
	}

	ulNewFlags = ps_vma->vm_flags;
#if 0
	/* Discard user read/write request, we will pull these flags from the PMR */
	ulNewFlags &= ~(VM_READ | VM_WRITE);

	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE)
	{
		ulNewFlags |= VM_READ;
	}
	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)
	{
		ulNewFlags |= VM_WRITE;
	}
#endif

	ps_vma->vm_flags = ulNewFlags;

#if defined(__arm__)
	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags));
#elif defined(__i386__) || defined(__x86_64)
	sPageProt = pgprot_modify(ps_vma->vm_page_prot,
							   vm_get_page_prot(ulNewFlags));
#elif defined(__metag__)
	sPageProt = vm_get_page_prot(ulNewFlags);
#else
#error Please add pgprot_modify equivalent for your system
#endif
	ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags);
	switch (ui32CPUCacheFlags)
	{
		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
				sPageProt = pgprot_noncached(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
				sPageProt = pgprot_writecombine(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
				break;

		default:
				eError = PVRSRV_ERROR_INVALID_PARAMS;
				goto e1;
	}
	ps_vma->vm_page_prot = sPageProt;

    uiLength = ps_vma->vm_end - ps_vma->vm_start;

    for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
    {
        IMG_SIZE_T uiNumContiguousBytes;
        IMG_INT32 iStatus;
        IMG_CPU_PHYADDR sCpuPAddr;
        IMG_BOOL bValid;
	struct page *psPage = NULL;

        uiNumContiguousBytes = 1ULL<<PAGE_SHIFT;
        eError = PMR_CpuPhysAddr(psPMR,
                                 uiOffset,
                                 &sCpuPAddr,
                                 &bValid);
        PVR_ASSERT(eError == PVRSRV_OK);
        if (eError)
        {
            goto e2;
        }

		/*
			Only map in pages that are valid, any that aren't will be picked up
			by the nopage handler which will return a zeroed page for us
		*/
		if (bValid)
		{
	        uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT;
	        PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr);

		PVR_ASSERT(pfn_valid(uiPFN));
		psPage = pfn_to_page(uiPFN);
		iStatus = vm_insert_page(ps_vma,
				ps_vma->vm_start + uiOffset,
				psPage);

	        PVR_ASSERT(iStatus == 0);
	        if(iStatus)
	        {
	            // N.B. not the right error code, but, it doesn't get propagated anyway... :(
	            eError = PVRSRV_ERROR_OUT_OF_MEMORY;
	
	            goto e2;
	        }
		}
        (void)pFile;
    }

    ps_vma->vm_flags |= VM_IO;

/* Don't include the mapping in core dumps */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
    ps_vma->vm_flags |= VM_DONTDUMP;
#else
    ps_vma->vm_flags |= VM_RESERVED;
#endif

    /*
     * Disable mremap because our nopage handler assumes all
     * page requests have already been validated.
     */
    ps_vma->vm_flags |= VM_DONTEXPAND;
    
    /* Don't allow mapping to be inherited across a process fork */
    ps_vma->vm_flags |= VM_DONTCOPY;

    /* let us see the PMR so we can unlock it later */
    ps_vma->vm_private_data = psPMR;

    /* Install open and close handlers for ref-counting */
    ps_vma->vm_ops = &gsMMapOps;

	LinuxUnLockMutex(&g_sMMapMutex);

    return 0;

    /*
      error exit paths follow
    */
 e2:
    PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error.  Abort!"));
    PMRUnlockSysPhysAddresses(psPMR);
 e1:
	PMRUnrefPMR(psPMR);
	goto em1;
 e0:
	LinuxUnLockMutex(&gPVRSRVLock);
 em1:
    PVR_ASSERT(eError != PVRSRV_OK);
    PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError));
	LinuxUnLockMutex(&g_sMMapMutex);

    return -ENOENT; // -EAGAIN // or what?
}