/*! ****************************************************************************** @Function PVRSRVTimeTraceSyncObject @Description Write trace item with a sync object @Input ui32Group : Trace item's group ID @Input ui32Token : Trace item's ui32Token ID @Input psSync : Sync object @Input ui8SyncOpp : Sync object operation @Return None ******************************************************************************/ IMG_VOID PVRSRVTimeTraceSyncObject(IMG_UINT32 ui32Group, IMG_UINT32 ui32Token, PVRSRV_KERNEL_SYNC_INFO *psSync, IMG_UINT8 ui8SyncOp) { IMG_UINT32 *pui32TraceItem; IMG_UINT32 *ui32Ptr; IMG_UINT32 ui32Size = PVRSRV_TRACE_TYPE_SYNC_SIZE; LinuxLockMutex(&g_sTTraceMutex); PVRSRVTimeTraceAllocItem(&pui32TraceItem, ui32Size); if (!pui32TraceItem) { PVR_DPF((PVR_DBG_ERROR, "Can't find buffer\n")); LinuxUnLockMutex(&g_sTTraceMutex); return; } ui32Ptr = PVRSRVTimeTraceWriteHeader(pui32TraceItem, ui32Group, PVRSRV_TRACE_CLASS_SYNC, ui32Token, ui32Size, PVRSRV_TRACE_TYPE_SYNC, 1); ui32Ptr[PVRSRV_TRACE_SYNC_UID] = psSync->ui32UID; ui32Ptr[PVRSRV_TRACE_SYNC_WOP] = psSync->psSyncData->ui32WriteOpsPending; ui32Ptr[PVRSRV_TRACE_SYNC_WOC] = psSync->psSyncData->ui32WriteOpsComplete; ui32Ptr[PVRSRV_TRACE_SYNC_ROP] = psSync->psSyncData->ui32ReadOpsPending; ui32Ptr[PVRSRV_TRACE_SYNC_ROC] = psSync->psSyncData->ui32ReadOpsComplete; ui32Ptr[PVRSRV_TRACE_SYNC_RO2P] = psSync->psSyncData->ui32ReadOps2Pending; ui32Ptr[PVRSRV_TRACE_SYNC_RO2C] = psSync->psSyncData->ui32ReadOps2Complete; ui32Ptr[PVRSRV_TRACE_SYNC_WO_DEV_VADDR] = psSync->sWriteOpsCompleteDevVAddr.uiAddr; ui32Ptr[PVRSRV_TRACE_SYNC_RO_DEV_VADDR] = psSync->sReadOpsCompleteDevVAddr.uiAddr; ui32Ptr[PVRSRV_TRACE_SYNC_RO2_DEV_VADDR] = psSync->sReadOps2CompleteDevVAddr.uiAddr; ui32Ptr[PVRSRV_TRACE_SYNC_OP] = ui8SyncOp; LinuxUnLockMutex(&g_sTTraceMutex); }
/*! ****************************************************************************** @Function PVRSRVTimeTraceBufferDestroy @Description Destroy a trace buffer. Note: We assume that this will only be called once per process. @Input ui32PID : PID of the process that is creating the buffer @Return none ******************************************************************************/ PVRSRV_ERROR PVRSRVTimeTraceBufferDestroy(IMG_UINT32 ui32PID) { #if !defined(TTRACE_KEEP_BUFFER_ON_EXIT) sTimeTraceBuffer *psBuffer; #if defined(DUMP_TTRACE_BUFFERS_ON_EXIT) PVRSRVDumpTimeTraceBuffers(); #endif LinuxLockMutex(&g_sTTraceMutex); psBuffer = (sTimeTraceBuffer *) HASH_Retrieve(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); if (psBuffer) { OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(sTimeTraceBuffer) + TIME_TRACE_BUFFER_SIZE, psBuffer, NULL); HASH_Remove(g_psBufferTable, (IMG_UINTPTR_T) ui32PID); LinuxUnLockMutex(&g_sTTraceMutex); return PVRSRV_OK; } PVR_DPF((PVR_DBG_ERROR, "PVRSRVTimeTraceBufferDestroy: Can't find trace buffer in hash table")); LinuxUnLockMutex(&g_sTTraceMutex); return PVRSRV_ERROR_INVALID_PARAMS; #else return PVRSRV_OK; #endif }
DRI_DRM_STATIC IMG_INT PVRDRMUnprivCmd(struct drm_device *dev, void *arg, struct drm_file *pFile) { IMG_INT ret = 0; LinuxLockMutex(&gPVRSRVLock); if (arg == NULL) { ret = -EFAULT; } else { IMG_UINT32 *pui32Args = (IMG_UINT32 *)arg; IMG_UINT32 ui32Cmd = pui32Args[0]; IMG_UINT32 *pui32OutArg = (IMG_UINT32 *)arg; switch (ui32Cmd) { case PVR_DRM_UNPRIV_INIT_SUCCESFUL: *pui32OutArg = PVRSRVGetInitServerState(PVRSRV_INIT_SERVER_SUCCESSFUL) ? 1 : 0; break; default: ret = -EFAULT; } } LinuxUnLockMutex(&gPVRSRVLock); return ret; }
IMG_EXPORT IMG_VOID PVRSRVDebugPrintfDumpCCB(void) { int i; LinuxLockMutex(&gsDebugCCBMutex); for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++) { PVRSRV_DEBUG_CCB *psDebugCCBEntry = &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX]; /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */ if (!psDebugCCBEntry->pszFile) { continue; } printk(KERN_ERR "%s:%d: (%ld.%ld,tid=%u) %s\n", psDebugCCBEntry->pszFile, psDebugCCBEntry->iLine, (long)psDebugCCBEntry->sTimeVal.tv_sec, (long)psDebugCCBEntry->sTimeVal.tv_usec, psDebugCCBEntry->ui32TID, psDebugCCBEntry->pcMesg); /* Clear this entry so it doesn't get printed the next time again. */ psDebugCCBEntry->pszFile = IMG_NULL; } LinuxUnLockMutex(&gsDebugCCBMutex); }
IMG_EXPORT IMG_VOID PVRSRVDebugPrintfDumpCCB(void) { int i; LinuxLockMutex(&gsDebugCCBMutex); for(i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++) { PVRSRV_DEBUG_CCB *psDebugCCBEntry = &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX]; /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */ if(!psDebugCCBEntry->pszFile) continue; printk("%s:%d:\t[%5ld.%6ld] %s\n", psDebugCCBEntry->pszFile, psDebugCCBEntry->iLine, (long)psDebugCCBEntry->sTimeVal.tv_sec, (long)psDebugCCBEntry->sTimeVal.tv_usec, psDebugCCBEntry->pcMesg); } LinuxUnLockMutex(&gsDebugCCBMutex); }
void SYSPVRSuspendUnlock(struct drm_device *dev) { PVR_UNREFERENCED_PARAMETER(dev); LinuxUnLockMutex(&gPVRSRVLock); }
static off_t printLinuxBridgeStats(IMG_CHAR * buffer, size_t count, off_t off) { PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry; off_t Ret; LinuxLockMutex(&gPVRSRVLock); if(!off) { if(count < 500) { Ret = 0; goto unlock_and_return; } Ret = printAppend(buffer, count, 0, "Total ioctl call count = %lu\n" "Total number of bytes copied via copy_from_user = %lu\n" "Total number of bytes copied via copy_to_user = %lu\n" "Total number of bytes copied via copy_*_user = %lu\n\n" "%-45s | %-40s | %10s | %20s | %10s\n", g_BridgeGlobalStats.ui32IOCTLCount, g_BridgeGlobalStats.ui32TotalCopyFromUserBytes, g_BridgeGlobalStats.ui32TotalCopyToUserBytes, g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+g_BridgeGlobalStats.ui32TotalCopyToUserBytes, "Bridge Name", "Wrapper Function", "Call Count", "copy_from_user Bytes", "copy_to_user Bytes" ); goto unlock_and_return; } if(off > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) { Ret = END_OF_FILE; goto unlock_and_return; } if(count < 300) { Ret = 0; goto unlock_and_return; } psEntry = &g_BridgeDispatchTable[off-1]; Ret = printAppend(buffer, count, 0, "%-45s %-40s %-10lu %-20lu %-10lu\n", psEntry->pszIOCName, psEntry->pszFunctionName, psEntry->ui32CallCount, psEntry->ui32CopyFromUserTotalBytes, psEntry->ui32CopyToUserTotalBytes); unlock_and_return: LinuxUnLockMutex(&gPVRSRVLock); return Ret; }
/*! ****************************************************************************** @Function PVRSRVTimeTraceBufferCreate @Description Create a trace buffer. Note: We assume that this will only be called once per process. @Input ui32PID : PID of the process that is creating the buffer @Return none ******************************************************************************/ PVRSRV_ERROR PVRSRVTimeTraceBufferCreate(IMG_UINT32 ui32PID) { PVRSRV_ERROR ret; LinuxLockMutex(&g_sTTraceMutex); ret = _PVRSRVTimeTraceBufferCreate(ui32PID); LinuxUnLockMutex(&g_sTTraceMutex); return ret; }
/*! ****************************************************************************** @Function PVRSRVTimeTraceArray @Description Write trace item with an array of data @Input ui32Group : Trace item's group ID @Input ui32Class : Trace item's class ID @Input ui32Token : Trace item's ui32Token ID @Input ui32Size : Trace item's data payload size @Input ui32Type : Trace item's data type @Input ui32Count : Trace item's data count @Input pui8Data : Pointer to data array @Return Pointer to data payload space, or NULL if no data payload ******************************************************************************/ IMG_VOID PVRSRVTimeTraceArray(IMG_UINT32 ui32Group, IMG_UINT32 ui32Class, IMG_UINT32 ui32Token, IMG_UINT32 ui32Type, IMG_UINT32 ui32Count, IMG_UINT8 *pui8Data) { IMG_UINT32 *pui32TraceItem; IMG_UINT32 ui32Size, ui32TypeSize; IMG_UINT8 *ui8Ptr; /* Only the 1st 4 sizes are for ui types, others are "special" */ switch (ui32Type) { case PVRSRV_TRACE_TYPE_UI8: ui32TypeSize = 1; break; case PVRSRV_TRACE_TYPE_UI16: ui32TypeSize = 2; break; case PVRSRV_TRACE_TYPE_UI32: ui32TypeSize = 4; break; case PVRSRV_TRACE_TYPE_UI64: ui32TypeSize = 8; break; default: PVR_DPF((PVR_DBG_ERROR, "Unsupported size\n")); return; } ui32Size = ui32TypeSize * ui32Count; /* Allocate space from the buffer */ LinuxLockMutex(&g_sTTraceMutex); PVRSRVTimeTraceAllocItem(&pui32TraceItem, ui32Size); if (!pui32TraceItem) { PVR_DPF((PVR_DBG_ERROR, "Can't find buffer\n")); LinuxUnLockMutex(&g_sTTraceMutex); return; } ui8Ptr = PVRSRVTimeTraceWriteHeader(pui32TraceItem, ui32Group, ui32Class, ui32Token, ui32Size, ui32Type, ui32Count); if (ui8Ptr) { OSMemCopy(ui8Ptr, pui8Data, ui32Size); } LinuxUnLockMutex(&g_sTTraceMutex); }
void PVRSRVExportFDToIONHandles(int fd, struct ion_client **client, struct ion_handle *handles[2]) { PVRSRV_FILE_PRIVATE_DATA *psPrivateData; PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; LinuxMemArea *psLinuxMemArea; PVRSRV_ERROR eError; struct file *psFile; /* Take the bridge mutex so the handle won't be freed underneath us */ LinuxLockMutex(&gPVRSRVLock); psFile = fget(fd); if(!psFile) goto err_unlock; psPrivateData = psFile->private_data; if(!psPrivateData) { PVR_DPF((PVR_DBG_ERROR, "%s: struct file* has no private_data; " "invalid export handle", __func__)); goto err_fput; } eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, (IMG_PVOID *)&psKernelMemInfo, psPrivateData->hKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO); if(eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up MEM_INFO handle", __func__)); goto err_fput; } psLinuxMemArea = (LinuxMemArea *)psKernelMemInfo->sMemBlk.hOSMemHandle; BUG_ON(psLinuxMemArea == IMG_NULL); if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_ION) { PVR_DPF((PVR_DBG_ERROR, "%s: Valid handle, but not an ION buffer", __func__)); goto err_fput; } handles[0] = psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[0]; handles[1] = psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[1]; if(client) *client = gpsIONClient; err_fput: fput(psFile); err_unlock: /* Allow PVRSRV clients to communicate with srvkm again */ LinuxUnLockMutex(&gPVRSRVLock); }
static void ProcSeqStartstopBridgeStats(struct seq_file *sfile,IMG_BOOL start) { if(start) { LinuxLockMutex(&gPVRSRVLock); } else { LinuxUnLockMutex(&gPVRSRVLock); } }
static inline void ReleaseBufferLock(unsigned long ulLockFlags) { if (USE_SPIN_LOCK) { spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags); } else { LinuxUnLockMutex(&gsDebugMutexNonIRQ); } }
static int PVRDRM_Display_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile) { int res; LinuxLockMutexNested(&gPVRSRVLock, PVRSRV_LOCK_CLASS_BRIDGE); res = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Ioctl)(dev, arg, pFile); LinuxUnLockMutex(&gPVRSRVLock); return res; }
static IMG_INT PVRDRM_Display_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile) { IMG_INT res; LinuxLockMutex(&gPVRSRVLock); res = PVR_DRM_MAKENAME(DISPLAY_CONTROLLER, _Ioctl)(dev, arg, pFile); LinuxUnLockMutex(&gPVRSRVLock); return res; }
static int PVRSRVRelease(struct inode unref__ * pInode, struct file unref__ * pFile) { int Ret = 0; LinuxLockMutex(&gPVRSRVLock); PVRSRVProcessDisconnect(OSGetCurrentProcessIDKM()); LinuxUnLockMutex(&gPVRSRVLock); return Ret; }
static int PVRSRVOpen(struct inode unref__ * pInode, struct file unref__ * pFile) { int Ret = 0; LinuxLockMutex(&gPVRSRVLock); if (PVRSRVProcessConnect(OSGetCurrentProcessIDKM()) != PVRSRV_OK) Ret = -ENOMEM; LinuxUnLockMutex(&gPVRSRVLock); return Ret; }
static inline void ReleaseBufferLock(unsigned long ulLockFlags) { #if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) if (USE_SPIN_LOCK) #endif { spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags); } #if !defined(PVR_DEBUG_ALWAYS_USE_SPINLOCK) else { LinuxUnLockMutex(&gsDebugMutexNonIRQ); } #endif }
static void AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, const IMG_CHAR *szBuffer) { LinuxLockMutex(&gsDebugCCBMutex); gsDebugCCB[giOffset].pszFile = pszFileName; gsDebugCCB[giOffset].iLine = ui32Line; gsDebugCCB[giOffset].ui32TID = current->tgid; do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal); strncpy(gsDebugCCB[giOffset].pcMesg, szBuffer, PVRSRV_DEBUG_CCB_MESG_MAX - 1); gsDebugCCB[giOffset].pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX - 1] = 0; giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX; LinuxUnLockMutex(&gsDebugCCBMutex); }
/*! ****************************************************************************** @Function PVRSRVDumpTimeTraceBuffers @Description Dump the contents of all the trace buffers. @Return None ******************************************************************************/ IMG_VOID PVRSRVDumpTimeTraceBuffers(IMG_VOID) { LinuxLockMutex(&g_sTTraceMutex); HASH_Iterate(g_psBufferTable, PVRSRVDumpTimeTraceBuffer); LinuxUnLockMutex(&g_sTTraceMutex); }
int PVRSRVExportFDToIONHandles(int fd, struct ion_client **client, struct ion_handle **handles, unsigned int *num_handles) { PVRSRV_FILE_PRIVATE_DATA *psPrivateData; PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; LinuxMemArea *psLinuxMemArea; PVRSRV_ERROR eError; struct file *psFile; int i; unsigned int ui32NumHandles = *num_handles; int ret = -EINVAL; /* Take the bridge mutex so the handle won't be freed underneath us */ LinuxLockMutex(&gPVRSRVLock); psFile = fget(fd); if(!psFile) goto err_unlock; psPrivateData = psFile->private_data; if(!psPrivateData) { PVR_DPF((PVR_DBG_ERROR, "%s: struct file* has no private_data; " "invalid export handle", __func__)); goto err_fput; } eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, (IMG_PVOID *)&psKernelMemInfo, psPrivateData->hKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO); if(eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed to look up MEM_INFO handle", __func__)); goto err_fput; } psLinuxMemArea = (LinuxMemArea *)psKernelMemInfo->sMemBlk.hOSMemHandle; BUG_ON(psLinuxMemArea == IMG_NULL); if(psLinuxMemArea->eAreaType != LINUX_MEM_AREA_ION) { PVR_DPF((PVR_DBG_ERROR, "%s: Valid handle, but not an ION buffer", __func__)); goto err_fput; } /* Client is requesting fewer handles then we have */ if(ui32NumHandles < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes) { PVR_DPF((PVR_DBG_ERROR, "%s: Client requested %u handles, but we have %u", __func__, ui32NumHandles, psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes)); /* Clear client handles */ for (i = 0; i < ui32NumHandles; i++) handles[i] = NULL; /* Return number of handles to client */ *num_handles = psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes; goto err_fput; } for (i = 0; (i < psLinuxMemArea->uData.sIONTilerAlloc.ui32NumValidPlanes) && (i < MAX_HANDLES_PER_FD); i++) handles[i] = psLinuxMemArea->uData.sIONTilerAlloc.psIONHandle[i]; *num_handles = i; if(client) *client = gpsIONClient; ret = 0; err_fput: fput(psFile); err_unlock: /* Allow PVRSRV clients to communicate with srvkm again */ LinuxUnLockMutex(&gPVRSRVLock); return ret; }
int MMapPMR(struct file* pFile, struct vm_area_struct* ps_vma) { PVRSRV_ERROR eError; IMG_HANDLE hSecurePMRHandle; IMG_SIZE_T uiLength; IMG_DEVMEM_OFFSET_T uiOffset; unsigned long uiPFN; IMG_HANDLE hPMRResmanHandle; PMR *psPMR; PMR_FLAGS_T ulPMRFlags; IMG_UINT32 ui32CPUCacheFlags; unsigned long ulNewFlags = 0; pgprot_t sPageProt; #if defined(SUPPORT_DRM) CONNECTION_DATA *psConnection = LinuxConnectionFromFile(PVR_DRM_FILE_FROM_FILE(pFile)); if (ps_vma->vm_pgoff > INT_MAX) { ps_vma->vm_pgoff -= ((unsigned int)INT_MAX + 1); return MMapGEM(pFile, ps_vma); } #else CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile); #endif /* * Both PVRSRVLookupHandle and ResManFindPrivateDataByPtr * require the bridge mutex to be held for thread safety. */ LinuxLockMutex(&gPVRSRVLock); LinuxLockMutex(&g_sMMapMutex); hSecurePMRHandle=(IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff); eError = PVRSRVLookupHandle(psConnection->psHandleBase, (IMG_HANDLE *) &hPMRResmanHandle, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); if (eError != PVRSRV_OK) { goto e0; } eError = ResManFindPrivateDataByPtr(hPMRResmanHandle, (IMG_VOID **)&psPMR); if (eError != PVRSRV_OK) { goto e0; } /* Take a reference on the PMR, make's sure that it can't be freed while it's mapped into the user process */ PMRRefPMR(psPMR); LinuxUnLockMutex(&gPVRSRVLock); eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT); if (eError != PVRSRV_OK) { goto e1; } if (((ps_vma->vm_flags & VM_WRITE) != 0) && ((ps_vma->vm_flags & VM_SHARED) == 0)) { eError = PVRSRV_ERROR_INVALID_PARAMS; goto e1; } /* we ought to call PMR_Flags() here to check the permissions against the requested mode, and possibly to set up the cache control protflags */ eError = PMR_Flags(psPMR, &ulPMRFlags); if (eError != PVRSRV_OK) { goto e1; } ulNewFlags = ps_vma->vm_flags; #if 0 /* Discard user read/write request, we will pull these flags from the PMR */ ulNewFlags &= ~(VM_READ | VM_WRITE); if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE) { ulNewFlags |= VM_READ; } if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) { ulNewFlags |= VM_WRITE; } #endif ps_vma->vm_flags = ulNewFlags; #if defined(__arm__) sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags)); #elif defined(__i386__) || defined(__x86_64) sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags)); #elif defined(__metag__) || defined(__mips__) sPageProt = vm_get_page_prot(ulNewFlags); #else #error Please add pgprot_modify equivalent for your system #endif ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags); switch (ui32CPUCacheFlags) { case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: sPageProt = pgprot_noncached(sPageProt); break; case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: sPageProt = pgprot_writecombine(sPageProt); break; case PVRSRV_MEMALLOCFLAG_CPU_CACHED: break; default: eError = PVRSRV_ERROR_INVALID_PARAMS; goto e1; } ps_vma->vm_page_prot = sPageProt; uiLength = ps_vma->vm_end - ps_vma->vm_start; ps_vma->vm_flags |= VM_IO; /* Don't include the mapping in core dumps */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) ps_vma->vm_flags |= VM_DONTDUMP; #else ps_vma->vm_flags |= VM_RESERVED; #endif /* * Disable mremap because our nopage handler assumes all * page requests have already been validated. */ ps_vma->vm_flags |= VM_DONTEXPAND; /* Don't allow mapping to be inherited across a process fork */ ps_vma->vm_flags |= VM_DONTCOPY; #if defined(PVR_MMAP_USE_VM_INSERT) { IMG_BOOL bMixedMap = IMG_FALSE; /* Scan the map range for pfns without struct page* handling. If we find * one, this is a mixed map, and we can't use vm_insert_page(). */ for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT) { IMG_CPU_PHYADDR sCpuPAddr; IMG_BOOL bValid; eError = PMR_CpuPhysAddr(psPMR, uiOffset, &sCpuPAddr, &bValid); PVR_ASSERT(eError == PVRSRV_OK); if (eError) { goto e2; } if (bValid) { uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT; PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr); if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0) { bMixedMap = IMG_TRUE; } } } if (bMixedMap) { ps_vma->vm_flags |= VM_MIXEDMAP; } } #endif /* defined(PVR_MMAP_USE_VM_INSERT) */ for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT) { IMG_SIZE_T uiNumContiguousBytes; IMG_INT32 iStatus; IMG_CPU_PHYADDR sCpuPAddr; IMG_BOOL bValid; uiNumContiguousBytes = 1ULL<<PAGE_SHIFT; eError = PMR_CpuPhysAddr(psPMR, uiOffset, &sCpuPAddr, &bValid); PVR_ASSERT(eError == PVRSRV_OK); if (eError) { goto e2; } /* Only map in pages that are valid, any that aren't will be picked up by the nopage handler which will return a zeroed page for us */ if (bValid) { uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT; PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr); #if defined(PVR_MMAP_USE_VM_INSERT) if (ps_vma->vm_flags & VM_MIXEDMAP) { /* This path is just for debugging. It should be equivalent * to the remap_pfn_range() path. */ iStatus = vm_insert_mixed(ps_vma, ps_vma->vm_start + uiOffset, uiPFN); } else { iStatus = vm_insert_page(ps_vma, ps_vma->vm_start + uiOffset, pfn_to_page(uiPFN)); } #else /* defined(PVR_MMAP_USE_VM_INSERT) */ iStatus = remap_pfn_range(ps_vma, ps_vma->vm_start + uiOffset, uiPFN, uiNumContiguousBytes, ps_vma->vm_page_prot); #endif /* defined(PVR_MMAP_USE_VM_INSERT) */ PVR_ASSERT(iStatus == 0); if(iStatus) { // N.B. not the right error code, but, it doesn't get propagated anyway... :( eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e2; } #if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) /* USER MAPPING*/ #if defined(PVRSRV_MEMORY_STATS_LITE) PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE); #else PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, (IMG_VOID*)(IMG_UINTPTR_T)(ps_vma->vm_start + uiOffset), sCpuPAddr, PAGE_SIZE, IMG_NULL); #endif #endif } (void)pFile; } /* let us see the PMR so we can unlock it later */ ps_vma->vm_private_data = psPMR; /* Install open and close handlers for ref-counting */ ps_vma->vm_ops = &gsMMapOps; LinuxUnLockMutex(&g_sMMapMutex); return 0; /* error exit paths follow */ e2: PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error. Abort!")); PMRUnlockSysPhysAddresses(psPMR); e1: PMRUnrefPMR(psPMR); goto em1; e0: LinuxUnLockMutex(&gPVRSRVLock); em1: PVR_ASSERT(eError != PVRSRV_OK); PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError)); LinuxUnLockMutex(&g_sMMapMutex); return -ENOENT; // -EAGAIN // or what? }
IMG_INT32 PVRSRV_BridgeDispatchKM(struct file *pFile, IMG_UINT unref__ ioctlCmd, IMG_UINT32 arg) #endif { IMG_UINT32 cmd; #if !defined(SUPPORT_DRI_DRM) PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg; PVRSRV_BRIDGE_PACKAGE sBridgePackageKM; #endif PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM; IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); PVRSRV_PER_PROCESS_DATA *psPerProc; IMG_INT err = -EFAULT; LinuxLockMutex(&gPVRSRVLock); #if defined(SUPPORT_DRI_DRM) PVR_UNREFERENCED_PARAMETER(dev); psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg; PVR_ASSERT(psBridgePackageKM != IMG_NULL); #else PVR_UNREFERENCED_PARAMETER(ioctlCmd); psBridgePackageKM = &sBridgePackageKM; if(!OSAccessOK(PVR_VERIFY_WRITE, psBridgePackageUM, sizeof(PVRSRV_BRIDGE_PACKAGE))) { PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments", __FUNCTION__)); goto unlock_and_return; } if(OSCopyFromUser(IMG_NULL, psBridgePackageKM, psBridgePackageUM, sizeof(PVRSRV_BRIDGE_PACKAGE)) != PVRSRV_OK) { goto unlock_and_return; } #endif cmd = psBridgePackageKM->ui32BridgeID; #if defined(MODULE_TEST) switch (cmd) { case PVRSRV_BRIDGE_SERVICES_TEST_MEM1: { PVRSRV_ERROR eError = MemTest1(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_MEM2: { PVRSRV_ERROR eError = MemTest2(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_RESOURCE: { PVRSRV_ERROR eError = ResourceTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_EVENTOBJECT: { PVRSRV_ERROR eError = EventObjectTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_MEMMAPPING: { PVRSRV_ERROR eError = MemMappingTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_PROCESSID: { PVRSRV_ERROR eError = ProcessIDTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_CLOCKUSWAITUS: { PVRSRV_ERROR eError = ClockusWaitusTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_TIMER: { PVRSRV_ERROR eError = TimerTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_PRIVSRV: { PVRSRV_ERROR eError = PrivSrvTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA: { IMG_UINT32 ui32PID; PVRSRV_PER_PROCESS_DATA *psPerProc; PVRSRV_ERROR eError; ui32PID = OSGetCurrentProcessIDKM(); PVRSRVTrace("PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA %d", ui32PID); psPerProc = PVRSRVPerProcessData(ui32PID); eError = CopyDataTest(psBridgePackageKM->pvParamIn, psBridgePackageKM->pvParamOut, psPerProc); *(PVRSRV_ERROR*)psBridgePackageKM->pvParamOut = eError; err = 0; goto unlock_and_return; } case PVRSRV_BRIDGE_SERVICES_TEST_POWERMGMT: { PVRSRV_ERROR eError = PowerMgmtTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; } #endif if(cmd != PVRSRV_BRIDGE_CONNECT_SERVICES) { PVRSRV_ERROR eError; eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, (IMG_PVOID *)&psPerProc, psBridgePackageKM->hKernelServices, PVRSRV_HANDLE_TYPE_PERPROC_DATA); if(eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)", __FUNCTION__, eError)); goto unlock_and_return; } if(psPerProc->ui32PID != ui32PID) { PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data " "belonging to process %d", __FUNCTION__, ui32PID, psPerProc->ui32PID)); goto unlock_and_return; } } else { psPerProc = PVRSRVPerProcessData(ui32PID); if(psPerProc == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: " "Couldn't create per-process data area")); goto unlock_and_return; } } psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID); #if defined(PVR_SECURE_FD_EXPORT) switch(cmd) { case PVRSRV_BRIDGE_EXPORT_DEVICEMEM: { PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); if(psPrivateData->hKernelMemInfo) { PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo " "per file descriptor", __FUNCTION__)); err = -EINVAL; goto unlock_and_return; } break; } case PVRSRV_BRIDGE_MAP_DEV_MEMORY: { PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN = (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamIn; PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); if(!psPrivateData->hKernelMemInfo) { PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no " "associated MemInfo handle", __FUNCTION__)); err = -EINVAL; goto unlock_and_return; } psMapDevMemIN->hKernelMemInfo = psPrivateData->hKernelMemInfo; break; } default: { PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); if(psPrivateData->hKernelMemInfo) { PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried " "to use privileged service", __FUNCTION__)); goto unlock_and_return; } break; } } #endif err = BridgedDispatchKM(psPerProc, psBridgePackageKM); if(err != PVRSRV_OK) goto unlock_and_return; switch(cmd) { #if defined(PVR_SECURE_FD_EXPORT) case PVRSRV_BRIDGE_EXPORT_DEVICEMEM: { PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT = (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)psBridgePackageKM->pvParamOut; PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); psPrivateData->hKernelMemInfo = psExportDeviceMemOUT->hMemInfo; #if defined(SUPPORT_MEMINFO_IDS) psExportDeviceMemOUT->ui64Stamp = psPrivateData->ui64Stamp = ++ui64Stamp; #endif break; } #endif #if defined(SUPPORT_MEMINFO_IDS) case PVRSRV_BRIDGE_MAP_DEV_MEMORY: { PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT = (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamOut; PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp = psPrivateData->ui64Stamp; break; } case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY: { PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT = (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)psBridgePackageKM->pvParamOut; psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp = ++ui64Stamp; break; } #endif default: break; } unlock_and_return: LinuxUnLockMutex(&gPVRSRVLock); return err; }
static off_t printLinuxMemAreaRecords(char *buffer, size_t count, off_t off) { struct DEBUG_LINUX_MEM_AREA_REC *psRecord; off_t Ret; LinuxLockMutex(&gPVRSRVLock); if (!off) { if (count < 500) { Ret = 0; goto unlock_and_return; } Ret = printAppend(buffer, count, 0, "Number of Linux Memory Areas: %u\n" "At the current water mark these areas " "correspond to %u bytes (excluding SUB areas)\n" "At the highest water mark these areas " "corresponded to %u bytes (excluding SUB areas)\n" "\nDetails for all Linux Memory Areas:\n" "%s %-24s %s %s %-8s %-5s %s\n", g_LinuxMemAreaCount, g_LinuxMemAreaWaterMark, g_LinuxMemAreaHighWaterMark, "psLinuxMemArea", "LinuxMemType", "CpuVAddr", "CpuPAddr", "Bytes", "Pid", "Flags"); goto unlock_and_return; } for (psRecord = g_LinuxMemAreaRecords; --off && psRecord; psRecord = psRecord->psNext) ; if (!psRecord) { Ret = END_OF_FILE; goto unlock_and_return; } if (count < 500) { Ret = 0; goto unlock_and_return; } Ret = printAppend(buffer, count, 0, "%8p %-24s %8p %08x %-8d %-5u %08x=(%s)\n", psRecord->psLinuxMemArea, LinuxMemAreaTypeToString(psRecord->psLinuxMemArea-> eAreaType), LinuxMemAreaToCpuVAddr(psRecord->psLinuxMemArea), LinuxMemAreaToCpuPAddr(psRecord->psLinuxMemArea, 0).uiAddr, psRecord->psLinuxMemArea->ui32ByteSize, psRecord->pid, psRecord->ui32Flags, HAPFlagsToString(psRecord->ui32Flags) ); unlock_and_return: LinuxUnLockMutex(&gPVRSRVLock); return Ret; }
static off_t printMemoryRecords(char *buffer, size_t count, off_t off) { struct DEBUG_MEM_ALLOC_REC *psRecord; off_t Ret; LinuxLockMutex(&gPVRSRVLock); if (!off) { if (count < 1000) { Ret = 0; goto unlock_and_return; } Ret = printAppend(buffer, count, 0, "%-60s: %d bytes\n", "Current Water Mark of bytes allocated via kmalloc", g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Highest Water Mark of bytes allocated via kmalloc", g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMALLOC]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Current Water Mark of bytes allocated via vmalloc", g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Highest Water Mark of bytes allocated via vmalloc", g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_VMALLOC]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Current Water Mark of bytes allocated via alloc_pages", g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Highest Water Mark of bytes allocated via alloc_pages", g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_ALLOC_PAGES]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Current Water Mark of bytes allocated via ioremap", g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Highest Water Mark of bytes allocated via ioremap", g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IOREMAP]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Current Water Mark of bytes reserved for \"IO\" memory areas", g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Highest Water Mark of bytes allocated for \"IO\" memory areas", g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_IO]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Current Water Mark of bytes allocated via kmem_cache_alloc", g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Highest Water Mark of bytes allocated via kmem_cache_alloc", g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Current Water Mark of bytes mapped via kmap", g_WaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "Highest Water Mark of bytes mapped via kmap", g_HighWaterMarkData[DEBUG_MEM_ALLOC_TYPE_KMAP]); Ret = printAppend(buffer, count, Ret, "\n"); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "The Current Water Mark for memory allocated from system RAM", g_SysRAMWaterMark); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "The Highest Water Mark for memory allocated from system RAM", g_SysRAMHighWaterMark); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "The Current Water Mark for memory allocated from IO memory", g_IOMemWaterMark); Ret = printAppend(buffer, count, Ret, "%-60s: %d bytes\n", "The Highest Water Mark for memory allocated from IO memory", g_IOMemHighWaterMark); Ret = printAppend(buffer, count, Ret, "\n"); Ret = printAppend(buffer, count, Ret, "Details for all known allocations:\n" "%-16s %-8s %-8s %-10s %-5s %-10s %s\n", "Type", "CpuVAddr", "CpuPAddr", "Bytes", "PID", "PrivateData", "Filename:Line"); goto unlock_and_return; } if (count < 1000) { Ret = 0; goto unlock_and_return; } for (psRecord = g_MemoryRecords; --off && psRecord; psRecord = psRecord->psNext) ; if (!psRecord) { Ret = END_OF_FILE; goto unlock_and_return; } if (psRecord->eAllocType != DEBUG_MEM_ALLOC_TYPE_KMEM_CACHE) Ret = printAppend(buffer, count, 0, "%-16s %-8p %08lx %-10d %-5d %-10s %s:%d\n", DebugMemAllocRecordTypeToString(psRecord->eAllocType), psRecord->pvCpuVAddr, psRecord->ulCpuPAddr, psRecord->ui32Bytes, psRecord->pid, "NULL", psRecord->pszFileName, psRecord->ui32Line); else Ret = printAppend(buffer, count, 0, "%-16s %-8p %08lx %-10d %-5d %-10s %s:%d\n", DebugMemAllocRecordTypeToString(psRecord->eAllocType), psRecord->pvCpuVAddr, psRecord->ulCpuPAddr, psRecord->ui32Bytes, psRecord->pid, KMemCacheNameWrapper(psRecord->pvPrivateData), psRecord->pszFileName, psRecord->ui32Line); unlock_and_return: LinuxUnLockMutex(&gPVRSRVLock); return Ret; }