PVRSRV_ERROR RGXSLCCacheInvalidateRequest(PVRSRV_DEVICE_NODE *psDeviceNode, PMR *psPmr) { RGXFWIF_KCCB_CMD sFlushInvalCmd; IMG_UINT32 ulPMRFlags; IMG_UINT32 ui32DeviceCacheFlags; PVRSRV_ERROR eError = PVRSRV_OK; PVR_ASSERT(psDeviceNode); /* In DEINIT state, we stop scheduling SLC flush commands, because we don't know in what state the firmware is. * Anyway, if we are in DEINIT state, we don't care anymore about FW memory consistency */ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT) { /* get the PMR's caching flags */ eError = PMR_Flags(psPmr, &ulPMRFlags); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_WARNING, "RGXSLCCacheInvalidateRequest: Unable to get the caching attributes of PMR %p",psPmr)); } ui32DeviceCacheFlags = DevmemDeviceCacheMode(ulPMRFlags); /* Schedule a SLC flush and invalidate if * - the memory is cached. * - we can't get the caching attributes (by precaution). */ if ((ui32DeviceCacheFlags == PVRSRV_MEMALLOCFLAG_GPU_CACHED) || (eError != PVRSRV_OK)) { /* Schedule the SLC flush command ... */ #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate"); #endif sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE; sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0; sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0; eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice, RGXFWIF_DM_GP, &sFlushInvalCmd, sizeof(sFlushInvalCmd), IMG_TRUE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXSLCCacheInvalidateRequest: Failed to schedule SLC flush command with error (%u)", eError)); } else { /* Wait for the SLC flush to complete */ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXSLCCacheInvalidateRequest: SLC flush and invalidate aborted with error (%u)", eError)); } } } } return eError; }
int MMapPMR(struct file *pFile, struct vm_area_struct *ps_vma) { PVRSRV_ERROR eError; IMG_HANDLE hSecurePMRHandle; IMG_SIZE_T uiLength; IMG_DEVMEM_OFFSET_T uiOffset; unsigned long uiPFN; IMG_HANDLE hPMRResmanHandle; PMR *psPMR; PMR_FLAGS_T ulPMRFlags; IMG_UINT32 ui32CPUCacheFlags; unsigned long ulNewFlags = 0; pgprot_t sPageProt; #if defined(SUPPORT_DRM) CONNECTION_DATA *psConnection = LinuxConnectionFromFile(PVR_DRM_FILE_FROM_FILE(pFile)); #else CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile); #endif #if defined(PVR_MMAP_USE_VM_INSERT) IMG_BOOL bMixedMap = IMG_FALSE; #endif /* * The pmr lock used here to protect both handle related operations and PMR * operations. * This was introduced to fix lockdep issue. */ mutex_lock(&g_sMMapMutex); PMRLock(); #if defined(SUPPORT_DRM_DC_MODULE) psPMR = PVRSRVGEMMMapLookupPMR(pFile, ps_vma); if (!psPMR) #endif { hSecurePMRHandle = (IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff); eError = PVRSRVLookupHandle(psConnection->psHandleBase, (IMG_HANDLE *) &hPMRResmanHandle, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); if (eError != PVRSRV_OK) { goto e0; } eError = ResManFindPrivateDataByPtr(hPMRResmanHandle, (void **)&psPMR); if (eError != PVRSRV_OK) { goto e0; } } /* * Take a reference on the PMR, make's sure that it can't be freed * while it's mapped into the user process */ PMRRefPMR(psPMR); PMRUnlock(); eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT); if (eError != PVRSRV_OK) { goto e1; } if (((ps_vma->vm_flags & VM_WRITE) != 0) && ((ps_vma->vm_flags & VM_SHARED) == 0)) { eError = PVRSRV_ERROR_INVALID_PARAMS; goto e1; } /* * We ought to call PMR_Flags() here to check the permissions * against the requested mode, and possibly to set up the cache * control protflags */ eError = PMR_Flags(psPMR, &ulPMRFlags); if (eError != PVRSRV_OK) { goto e1; } ulNewFlags = ps_vma->vm_flags; #if 0 /* Discard user read/write request, we will pull these flags from the PMR */ ulNewFlags &= ~(VM_READ | VM_WRITE); if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE) { ulNewFlags |= VM_READ; } if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) { ulNewFlags |= VM_WRITE; } #endif ps_vma->vm_flags = ulNewFlags; #if defined (CONFIG_ARM64) sPageProt = __pgprot_modify(ps_vma->vm_page_prot, 0, vm_get_page_prot(ulNewFlags)); #elif defined(CONFIG_ARM) sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags)); #elif defined(CONFIG_X86) sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags)); #elif defined(CONFIG_METAG) || defined(CONFIG_MIPS) sPageProt = vm_get_page_prot(ulNewFlags); #else #error Please add pgprot_modify equivalent for your system #endif ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags); switch (ui32CPUCacheFlags) { case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: sPageProt = pgprot_noncached(sPageProt); break; case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: sPageProt = pgprot_writecombine(sPageProt); break; case PVRSRV_MEMALLOCFLAG_CPU_CACHED: break; default: eError = PVRSRV_ERROR_INVALID_PARAMS; goto e1; } ps_vma->vm_page_prot = sPageProt; uiLength = ps_vma->vm_end - ps_vma->vm_start; ps_vma->vm_flags |= VM_IO; /* Don't include the mapping in core dumps */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) ps_vma->vm_flags |= VM_DONTDUMP; #else ps_vma->vm_flags |= VM_RESERVED; #endif /* * Disable mremap because our nopage handler assumes all * page requests have already been validated. */ ps_vma->vm_flags |= VM_DONTEXPAND; /* Don't allow mapping to be inherited across a process fork */ ps_vma->vm_flags |= VM_DONTCOPY; #if defined(PVR_MMAP_USE_VM_INSERT) { /* Scan the map range for pfns without struct page* handling. If we find * one, this is a mixed map, and we can't use vm_insert_page(). */ for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT) { IMG_CPU_PHYADDR sCpuPAddr; IMG_BOOL bValid; eError = PMR_CpuPhysAddr(psPMR, uiOffset, &sCpuPAddr, &bValid); PVR_ASSERT(eError == PVRSRV_OK); if (eError) { goto e2; } if (bValid) { uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT; PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr); if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0) { bMixedMap = IMG_TRUE; } } } if (bMixedMap) { ps_vma->vm_flags |= VM_MIXEDMAP; } } #endif /* defined(PVR_MMAP_USE_VM_INSERT) */ for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT) { IMG_SIZE_T uiNumContiguousBytes; IMG_INT32 iStatus; IMG_CPU_PHYADDR sCpuPAddr; IMG_BOOL bValid; uiNumContiguousBytes = 1ULL<<PAGE_SHIFT; eError = PMR_CpuPhysAddr(psPMR, uiOffset, &sCpuPAddr, &bValid); PVR_ASSERT(eError == PVRSRV_OK); if (eError) { goto e2; } /* Only map in pages that are valid, any that aren't will be picked up by the nopage handler which will return a zeroed page for us */ if (bValid) { uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT; PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr); #if defined(PVR_MMAP_USE_VM_INSERT) if (bMixedMap) { /* This path is just for debugging. It should be equivalent * to the remap_pfn_range() path. */ iStatus = vm_insert_mixed(ps_vma, ps_vma->vm_start + uiOffset, uiPFN); } else { iStatus = vm_insert_page(ps_vma, ps_vma->vm_start + uiOffset, pfn_to_page(uiPFN)); } #else /* defined(PVR_MMAP_USE_VM_INSERT) */ iStatus = remap_pfn_range(ps_vma, ps_vma->vm_start + uiOffset, uiPFN, uiNumContiguousBytes, ps_vma->vm_page_prot); #endif /* defined(PVR_MMAP_USE_VM_INSERT) */ PVR_ASSERT(iStatus == 0); if(iStatus) { // N.B. not the right error code, but, it doesn't get propagated anyway... :( eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e2; } #if defined(PVRSRV_ENABLE_PROCESS_STATS) /* USER MAPPING*/ #if !defined(PVRSRV_ENABLE_MEMORY_STATS) PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE); #else PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, (IMG_VOID*)(IMG_UINTPTR_T)(ps_vma->vm_start + uiOffset), sCpuPAddr, PAGE_SIZE, IMG_NULL); #endif #endif } (void)pFile; } /* let us see the PMR so we can unlock it later */ ps_vma->vm_private_data = psPMR; /* Install open and close handlers for ref-counting */ ps_vma->vm_ops = &gsMMapOps; mutex_unlock(&g_sMMapMutex); return 0; /* error exit paths follow */ e2: PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error. Abort!")); PMRUnlockSysPhysAddresses(psPMR); e1: PMRUnrefPMR(psPMR); goto em1; e0: PVR_DPF((PVR_DBG_ERROR, "Error in MMapPMR critical section")); PMRUnlock(); em1: PVR_ASSERT(eError != PVRSRV_OK); PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError)); mutex_unlock(&g_sMMapMutex); return -ENOENT; // -EAGAIN // or what? }
int MMapPMR(struct file* pFile, struct vm_area_struct* ps_vma) { PVRSRV_ERROR eError; IMG_HANDLE hSecurePMRHandle; IMG_SIZE_T uiLength; IMG_DEVMEM_OFFSET_T uiOffset; unsigned long uiPFN; IMG_HANDLE hPMRResmanHandle; PMR *psPMR; PMR_FLAGS_T ulPMRFlags; IMG_UINT32 ui32CPUCacheFlags; unsigned long ulNewFlags = 0; pgprot_t sPageProt; #if defined(SUPPORT_DRM) // INTEL_TEMP // SINCE PVR_DRM_FILE_FROM_FILE is NOT found CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile->private_data); // INTEL_TEMP // SINCE PVR_DRM_FILE_FROM_FILE is NOT found //if (ps_vma->vm_pgoff > INT_MAX) //{ // ps_vma->vm_pgoff -= ((unsigned int)INT_MAX + 1); // return MMapGEM(pFile, ps_vma); //} #else CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile); #endif /* * Both PVRSRVLookupHandle and ResManFindPrivateDataByPtr * require the bridge mutex to be held for thread safety. */ LinuxLockMutex(&gPVRSRVLock); LinuxLockMutex(&g_sMMapMutex); hSecurePMRHandle=(IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff); eError = PVRSRVLookupHandle(psConnection->psHandleBase, (IMG_HANDLE *) &hPMRResmanHandle, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); if (eError != PVRSRV_OK) { goto e0; } eError = ResManFindPrivateDataByPtr(hPMRResmanHandle, (IMG_VOID **)&psPMR); if (eError != PVRSRV_OK) { goto e0; } /* Take a reference on the PMR, make's sure that it can't be freed while it's mapped into the user process */ PMRRefPMR(psPMR); LinuxUnLockMutex(&gPVRSRVLock); eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT); if (eError != PVRSRV_OK) { goto e1; } if (((ps_vma->vm_flags & VM_WRITE) != 0) && ((ps_vma->vm_flags & VM_SHARED) == 0)) { eError = PVRSRV_ERROR_INVALID_PARAMS; goto e1; } /* we ought to call PMR_Flags() here to check the permissions against the requested mode, and possibly to set up the cache control protflags */ eError = PMR_Flags(psPMR, &ulPMRFlags); if (eError != PVRSRV_OK) { goto e1; } ulNewFlags = ps_vma->vm_flags; #if 0 /* Discard user read/write request, we will pull these flags from the PMR */ ulNewFlags &= ~(VM_READ | VM_WRITE); if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE) { ulNewFlags |= VM_READ; } if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) { ulNewFlags |= VM_WRITE; } #endif ps_vma->vm_flags = ulNewFlags; #if defined(__arm__) sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags)); #elif defined(__i386__) || defined(__x86_64) sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags)); #elif defined(__metag__) sPageProt = vm_get_page_prot(ulNewFlags); #else #error Please add pgprot_modify equivalent for your system #endif ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags); switch (ui32CPUCacheFlags) { case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: sPageProt = pgprot_noncached(sPageProt); break; case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: sPageProt = pgprot_writecombine(sPageProt); break; case PVRSRV_MEMALLOCFLAG_CPU_CACHED: break; default: eError = PVRSRV_ERROR_INVALID_PARAMS; goto e1; } ps_vma->vm_page_prot = sPageProt; uiLength = ps_vma->vm_end - ps_vma->vm_start; for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT) { IMG_SIZE_T uiNumContiguousBytes; IMG_INT32 iStatus; IMG_CPU_PHYADDR sCpuPAddr; IMG_BOOL bValid; struct page *psPage = NULL; uiNumContiguousBytes = 1ULL<<PAGE_SHIFT; eError = PMR_CpuPhysAddr(psPMR, uiOffset, &sCpuPAddr, &bValid); PVR_ASSERT(eError == PVRSRV_OK); if (eError) { goto e2; } /* Only map in pages that are valid, any that aren't will be picked up by the nopage handler which will return a zeroed page for us */ if (bValid) { uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT; PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr); PVR_ASSERT(pfn_valid(uiPFN)); psPage = pfn_to_page(uiPFN); iStatus = vm_insert_page(ps_vma, ps_vma->vm_start + uiOffset, psPage); PVR_ASSERT(iStatus == 0); if(iStatus) { // N.B. not the right error code, but, it doesn't get propagated anyway... :( eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e2; } } (void)pFile; } ps_vma->vm_flags |= VM_IO; /* Don't include the mapping in core dumps */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) ps_vma->vm_flags |= VM_DONTDUMP; #else ps_vma->vm_flags |= VM_RESERVED; #endif /* * Disable mremap because our nopage handler assumes all * page requests have already been validated. */ ps_vma->vm_flags |= VM_DONTEXPAND; /* Don't allow mapping to be inherited across a process fork */ ps_vma->vm_flags |= VM_DONTCOPY; /* let us see the PMR so we can unlock it later */ ps_vma->vm_private_data = psPMR; /* Install open and close handlers for ref-counting */ ps_vma->vm_ops = &gsMMapOps; LinuxUnLockMutex(&g_sMMapMutex); return 0; /* error exit paths follow */ e2: PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error. Abort!")); PMRUnlockSysPhysAddresses(psPMR); e1: PMRUnrefPMR(psPMR); goto em1; e0: LinuxUnLockMutex(&gPVRSRVLock); em1: PVR_ASSERT(eError != PVRSRV_OK); PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError)); LinuxUnLockMutex(&g_sMMapMutex); return -ENOENT; // -EAGAIN // or what? }