/*! ****************************************************************************** @Function AcquireHandle @Description Acquire a new handle @Input psBase - Pointer to handle base structure phHandle - Points to a handle pointer pvData - Pointer to resource to be associated with the handle @Output phHandle - Points to a handle pointer @Return Error code or PVRSRV_OK ******************************************************************************/ static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase, IMG_HANDLE *phHandle, IMG_VOID *pvData) { IMG_UINT32 ui32NewIndex = BASE_TO_TOTAL_INDICES(psBase); HANDLE_IMPL_DATA *psNewHandleData = IMG_NULL; PVRSRV_ERROR eError; PVR_ASSERT(psBase != IMG_NULL); PVR_ASSERT(phHandle != IMG_NULL); PVR_ASSERT(pvData != IMG_NULL); /* Ensure there is a free handle */ eError = EnsureFreeHandles(psBase, 1); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: EnsureFreeHandles failed (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError))); return eError; } PVR_ASSERT(psBase->ui32TotalFreeHandCount != 0); if (!psBase->bPurgingEnabled) { /* Array index of first free handle */ ui32NewIndex = psBase->ui32FirstFreeIndex; /* Get handle array entry */ psNewHandleData = INDEX_TO_HANDLE_DATA(psBase, ui32NewIndex); } else { IMG_UINT32 ui32BlockedIndex; /* * If purging is enabled, we always try to allocate handles * at the front of the array, to increase the chances that * the size of the handle array can be reduced by a purge. * No linked list of free handles is kept; we search for * free handles as required. */ /* * ui32FirstFreeIndex should only be set when a new batch of * handle structures is allocated, and should always be a * multiple of the block size. */ PVR_ASSERT((psBase->ui32FirstFreeIndex % HANDLE_BLOCK_SIZE) == 0); for (ui32BlockedIndex = ROUND_DOWN_TO_MULTIPLE_OF_BLOCK_SIZE(psBase->ui32FirstFreeIndex); ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE) { HANDLE_BLOCK *psHandleBlock = BASE_AND_INDEX_TO_HANDLE_BLOCK(psBase, ui32BlockedIndex); if (psHandleBlock->ui32FreeHandCount == 0) { continue; } for (ui32NewIndex = ui32BlockedIndex; ui32NewIndex < ui32BlockedIndex + HANDLE_BLOCK_SIZE; ui32NewIndex++) { psNewHandleData = INDEX_TO_HANDLE_DATA(psBase, ui32NewIndex); if (psNewHandleData->pvData == IMG_NULL) { break; } } } psBase->ui32FirstFreeIndex = 0; PVR_ASSERT(INDEX_IS_VALID(psBase, ui32NewIndex)); } PVR_ASSERT(psNewHandleData != IMG_NULL); psBase->ui32TotalFreeHandCount--; PVR_ASSERT(INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32NewIndex) <= HANDLE_BLOCK_SIZE); PVR_ASSERT(INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32NewIndex) > 0); INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32NewIndex)--; /* No free list management if purging is enabled */ if (!psBase->bPurgingEnabled) { /* Check whether the last free handle has been allocated */ if (psBase->ui32TotalFreeHandCount == 0) { PVR_ASSERT(psBase->ui32FirstFreeIndex == ui32NewIndex); PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == (ui32NewIndex + 1)); psBase->ui32LastFreeIndexPlusOne = 0; psBase->ui32FirstFreeIndex = 0; } else { /* * Update the first free handle index. * If the "next free index plus one" field in the new * handle structure is zero, the next free index is * the index of the new handle plus one. This * convention has been adopted to simplify the * initialisation of freshly allocated handle * space. */ if (psNewHandleData->ui32NextIndexPlusOne == 0) { psBase->ui32FirstFreeIndex = ui32NewIndex + 1; } else { psBase->ui32FirstFreeIndex = psNewHandleData->ui32NextIndexPlusOne - 1; } } } PVR_ASSERT(HANDLE_DATA_TO_HANDLE(psNewHandleData) == INDEX_TO_HANDLE(ui32NewIndex)); psNewHandleData->pvData = pvData; psNewHandleData->ui32NextIndexPlusOne = 0; /* Return the new handle to the client */ *phHandle = INDEX_TO_HANDLE(ui32NewIndex); #if defined(DEBUG_HANDLEALLOC_KM) PVR_DPF((PVR_DBG_MESSAGE, "Handle acquire base %p hdl %p", psBase, *phHandle)); #endif return PVRSRV_OK; }
/*! ****************************************************************************** @Function EnableSGXClocks @Description Enable SGX clocks @Return PVRSRV_ERROR ******************************************************************************/ PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData) { #if !defined(NO_HARDWARE) SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; #if !defined(PM_RUNTIME_SUPPORT) IMG_INT res; long lRate,lNewRate; #endif int ret; /* SGX clocks already enabled? */ if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0) { return PVRSRV_OK; } PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks")); #if !defined(PM_RUNTIME_SUPPORT) PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks")); res=clk_enable(psSysSpecData->psSGX_FCK); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't enable SGX functional clock (%d)", res)); return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK; } lNewRate = clk_round_rate(psSysSpecData->psSGX_FCK, SYS_SGX_CLOCK_SPEED + ONE_MHZ); if (lNewRate <= 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Couldn't round SGX functional clock rate")); return PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE; } lRate = clk_get_rate(psSysSpecData->psSGX_FCK); if (lRate != lNewRate) { res = clk_set_rate(psSysSpecData->psSGX_FCK, lNewRate); if (res < 0) { PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Couldn't set SGX functional clock rate (%d)", res)); return PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE; } } #if defined(DEBUG) { IMG_UINT32 rate = clk_get_rate(psSysSpecData->psSGX_FCK); PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: SGX Functional Clock is %dMhz", HZ_TO_MHZ(rate))); } #endif #endif #if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) #if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) { struct gpu_platform_data *pdata; IMG_UINT32 max_freq_index; int res; pdata = (struct gpu_platform_data *)gpsPVRLDMDev->dev.platform_data; max_freq_index = psSysSpecData->ui32SGXFreqListSize - 2; /* * Request maximum frequency from DVFS layer if not already set. DVFS may * report busy if early in initialization, but all other errors are * considered serious. Upon any error we proceed assuming our safe frequency * value to be in use as indicated by the "unknown" index. */ if (psSysSpecData->ui32SGXFreqListIndex != max_freq_index) { PVR_ASSERT(pdata->device_scale != IMG_NULL); res = pdata->device_scale(&gpsPVRLDMDev->dev, #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,4,0)) &gpsPVRLDMDev->dev, #endif psSysSpecData->pui32SGXFreqList[max_freq_index]); if (res == 0) { psSysSpecData->ui32SGXFreqListIndex = max_freq_index; } else if (res == -EBUSY) { PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Unable to scale SGX frequency (EBUSY)")); psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1; } else if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Unable to scale SGX frequency (%d)", res)); psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1; } } } #endif /* defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) */ { /* * pm_runtime_get_sync returns 1 after the module has * been reloaded. */ #if defined(PM_RUNTIME_SUPPORT) int res = pm_runtime_get_sync(&gpsPVRLDMDev->dev); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res)); return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK; } #endif } #endif /* defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) */ SysEnableSGXInterrupts(psSysData); /* Indicate that the SGX clocks are enabled */ atomic_set(&psSysSpecData->sSGXClocksEnabled, 1); #else /* !defined(NO_HARDWARE) */ PVR_UNREFERENCED_PARAMETER(psSysData); #endif /* !defined(NO_HARDWARE) */ return PVRSRV_OK; }
/*! ****************************************************************************** @Function AcquireGPTimer @Description Acquire a GP timer @Return PVRSRV_ERROR ******************************************************************************/ static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData) { #if defined(PVR_OMAP4_TIMING_PRCM) struct clk *psCLK; IMG_INT res; struct clk *sys_ck; IMG_INT rate; #endif PVRSRV_ERROR eError; IMG_CPU_PHYADDR sTimerRegPhysBase; IMG_HANDLE hTimerEnable; IMG_UINT32 *pui32TimerEnable; #if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA) PVR_ASSERT(psSysSpecData->sTimerRegPhysBase.uiAddr == 0); #endif #if defined(PVR_OMAP4_TIMING_PRCM) /* assert our dependence on the GPTIMER11 module */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)) psCLK = clk_get(NULL, "timer7_fck"); #else psCLK = clk_get(NULL, "gpt7_fck"); #endif if (IS_ERR(psCLK)) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock")); goto ExitError; } psSysSpecData->psGPT11_FCK = psCLK; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) psCLK = clk_get(NULL, "gpt7_ick"); if (IS_ERR(psCLK)) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock")); goto ExitError; } psSysSpecData->psGPT11_ICK = psCLK; #endif #if 0 sys_ck = clk_get(NULL, "sys_clkin_ck"); if (IS_ERR(sys_ck)) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock")); goto ExitError; } if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck) { PVR_TRACE(("Setting GPTIMER11 parent to System Clock")); res = clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res)); goto ExitError; } } #endif rate = clk_get_rate(psSysSpecData->psGPT11_FCK); PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate))); res = clk_enable(psSysSpecData->psGPT11_FCK); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res)); goto ExitError; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)) res = clk_enable(psSysSpecData->psGPT11_ICK); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res)); goto ExitDisableGPT11FCK; } #endif #endif /* defined(PVR_OMAP4_TIMING_PRCM) */ /* Set the timer to non-posted mode */ sTimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_TSICR_SYS_PHYS_BASE; pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase, 4, PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, &hTimerEnable); if (pui32TimerEnable == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed")); goto ExitDisableGPT11ICK; } if(!(*pui32TimerEnable & 4)) { PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)")); /* Set posted mode */ *pui32TimerEnable |= 4; } OSUnMapPhysToLin(pui32TimerEnable, 4, PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, hTimerEnable); /* Enable the timer */ sTimerRegPhysBase.uiAddr = SYS_TI335x_GP7TIMER_ENABLE_SYS_PHYS_BASE; pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase, 4, PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, &hTimerEnable); if (pui32TimerEnable == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed")); goto ExitDisableGPT11ICK; } /* Enable and set autoreload on overflow */ *pui32TimerEnable = 3; OSUnMapPhysToLin(pui32TimerEnable, 4, PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, hTimerEnable); #if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA) psSysSpecData->sTimerRegPhysBase = sTimerRegPhysBase; #endif eError = PVRSRV_OK; goto Exit; ExitDisableGPT11ICK: #if defined(PVR_OMAP4_TIMING_PRCM) clk_disable(psSysSpecData->psGPT11_ICK); ExitDisableGPT11FCK: clk_disable(psSysSpecData->psGPT11_FCK); ExitError: #endif /* defined(PVR_OMAP4_TIMING_PRCM) */ eError = PVRSRV_ERROR_CLOCK_REQUEST_FAILED; Exit: return eError; }
/* Allocate and init an import structure */ IMG_INTERNAL PVRSRV_ERROR _DevmemImportStructAlloc(IMG_HANDLE hBridge, IMG_BOOL bExportable, DEVMEM_IMPORT **ppsImport) { DEVMEM_IMPORT *psImport; PVRSRV_ERROR eError; psImport = OSAllocMem(sizeof *psImport); if (psImport == IMG_NULL) { return PVRSRV_ERROR_OUT_OF_MEMORY; } /* Setup some known bad values for things we don't have yet */ psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON; psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON; psImport->sDeviceImport.psHeap = IMG_NULL; psImport->sDeviceImport.bMapped = IMG_FALSE; eError = OSLockCreate(&psImport->sDeviceImport.hLock, LOCK_TYPE_PASSIVE); if (eError != PVRSRV_OK) { goto failDIOSLockCreate; } psImport->sCPUImport.hOSMMapData = IMG_NULL; psImport->sCPUImport.pvCPUVAddr = IMG_NULL; eError = OSLockCreate(&psImport->sCPUImport.hLock, LOCK_TYPE_PASSIVE); if (eError != PVRSRV_OK) { goto failCIOSLockCreate; } /* Set up common elements */ psImport->hBridge = hBridge; psImport->bExportable = bExportable; /* Setup refcounts */ psImport->sDeviceImport.ui32RefCount = 0; psImport->sCPUImport.ui32RefCount = 0; psImport->ui32RefCount = 0; /* Create the lock */ eError = OSLockCreate(&psImport->hLock, LOCK_TYPE_PASSIVE); if (eError != PVRSRV_OK) { goto failILockAlloc; } #if !defined(__KERNEL__) && defined(SUPPORT_ION) psImport->sCPUImport.iDmaBufFd = -1; #endif *ppsImport = psImport; return PVRSRV_OK; failILockAlloc: OSLockDestroy(psImport->sCPUImport.hLock); failCIOSLockCreate: OSLockDestroy(psImport->sDeviceImport.hLock); failDIOSLockCreate: OSFreeMem(psImport); PVR_ASSERT(eError != PVRSRV_OK); return eError; }
/* Map an import into the CPU */ IMG_INTERNAL PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport) { PVRSRV_ERROR eError; DEVMEM_CPU_IMPORT *psCPUImport; IMG_SIZE_T uiMappingLength; psCPUImport = &psImport->sCPUImport; OSLockAcquire(psCPUImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psCPUImport->ui32RefCount, psCPUImport->ui32RefCount+1); if (psCPUImport->ui32RefCount++ == 0) { _DevmemImportStructAcquire(psImport); #if !defined(__KERNEL__) && defined(SUPPORT_ION) if (psImport->sCPUImport.iDmaBufFd >= 0) { void *pvCPUVAddr; /* For ion imports, use the ion fd and mmap facility to map the * buffer to user space. We can bypass the services bridge in * this case and possibly save some time. * * */ pvCPUVAddr = mmap(NULL, psImport->uiSize, PROT_READ | PROT_WRITE, MAP_SHARED, psImport->sCPUImport.iDmaBufFd, 0); if (pvCPUVAddr == MAP_FAILED) { eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED; goto failMap; } psCPUImport->hOSMMapData = pvCPUVAddr; psCPUImport->pvCPUVAddr = pvCPUVAddr; uiMappingLength = psImport->uiSize; } else #endif { eError = OSMMapPMR(psImport->hBridge, psImport->hPMR, psImport->uiSize, &psCPUImport->hOSMMapData, &psCPUImport->pvCPUVAddr, &uiMappingLength); if (eError != PVRSRV_OK) { goto failMap; } } /* There is no reason the mapping length is different to the size */ PVR_ASSERT(uiMappingLength == psImport->uiSize); } OSLockRelease(psCPUImport->hLock); return PVRSRV_OK; failMap: psCPUImport->ui32RefCount--; _DevmemImportStructRelease(psImport); OSLockRelease(psCPUImport->hLock); PVR_ASSERT(eError != PVRSRV_OK); return eError; }
PVRSRV_ERROR EnableSGXClocks(SYS_DATA *psSysData) { #if !defined(NO_HARDWARE) SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; if (atomic_read(&psSysSpecData->sSGXClocksEnabled) != 0) { return PVRSRV_OK; } PVR_DPF((PVR_DBG_MESSAGE, "EnableSGXClocks: Enabling SGX Clocks")); #if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) #if defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) { struct gpu_platform_data *pdata; IMG_UINT32 max_freq_index; int res; pdata = (struct gpu_platform_data *)gpsPVRLDMDev->dev.platform_data; max_freq_index = psSysSpecData->ui32SGXFreqListSize - 2; if (psSysSpecData->ui32SGXFreqListIndex != max_freq_index) { PVR_ASSERT(pdata->device_scale != IMG_NULL); res = pdata->device_scale(&gpsPVRLDMDev->dev, &gpsPVRLDMDev->dev, psSysSpecData->pui32SGXFreqList[max_freq_index]); if (res == 0) { psSysSpecData->ui32SGXFreqListIndex = max_freq_index; } else if (res == -EBUSY) { PVR_DPF((PVR_DBG_WARNING, "EnableSGXClocks: Unable to scale SGX frequency (EBUSY)")); psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1; } else if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: Unable to scale SGX frequency (%d)", res)); psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1; } } } #endif { int res = pm_runtime_get_sync(&gpsPVRLDMDev->dev); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSGXClocks: pm_runtime_get_sync failed (%d)", -res)); return PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK; } } #endif SysEnableSGXInterrupts(psSysData); atomic_set(&psSysSpecData->sSGXClocksEnabled, 1); #else PVR_UNREFERENCED_PARAMETER(psSysData); #endif return PVRSRV_OK; }
static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData) { #if defined(PVR_OMAP4_TIMING_PRCM) struct clk *psCLK; IMG_INT res; struct clk *sys_ck; IMG_INT rate; #endif PVRSRV_ERROR eError; IMG_CPU_PHYADDR sTimerRegPhysBase; IMG_HANDLE hTimerEnable; IMG_UINT32 *pui32TimerEnable; PVR_ASSERT(psSysSpecData->sTimerRegPhysBase.uiAddr == 0); psCLK = clk_get(NULL, "sgx_fck"); if (IS_ERR(psCLK)) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock")); return; } clk_enable(psCLK); psCLK = clk_get(NULL, "sgx_ick"); if (IS_ERR(psCLK)) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get SGX Interface Clock")); return; } clk_enable(psCLK); #if defined(PVR_OMAP4_TIMING_PRCM) psCLK = clk_get(NULL, "gpt6_fck"); if (IS_ERR(psCLK)) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER functional clock")); goto ExitError; } psSysSpecData->psGPT11_FCK = psCLK; psCLK = clk_get(NULL, "gpt6_ick"); if (IS_ERR(psCLK)) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER interface clock")); goto ExitError; } psSysSpecData->psGPT11_ICK = psCLK; sys_ck = clk_get(NULL, "sys_ck"); if (IS_ERR(sys_ck)) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock")); goto ExitError; } if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck) { PVR_TRACE(("Setting GPTIMER parent to System Clock")); res = clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER parent clock (%d)", res)); goto ExitError; } } rate = clk_get_rate(psSysSpecData->psGPT11_FCK); PVR_TRACE(("GPTIMER clock is %dMHz", HZ_TO_MHZ(rate))); res = clk_enable(psSysSpecData->psGPT11_FCK); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER functional clock (%d)", res)); goto ExitError; } res = clk_enable(psSysSpecData->psGPT11_ICK); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER interface clock (%d)", res)); goto ExitDisableGPT11FCK; } #endif sTimerRegPhysBase.uiAddr = SYS_OMAP3_GPTIMER_TSICR_SYS_PHYS_BASE; pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase, 4, PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, &hTimerEnable); if (pui32TimerEnable == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed")); goto ExitDisableGPT11ICK; } if(!(*pui32TimerEnable & 4)) { PVR_TRACE(("Setting GPTIMER mode to posted (currently is non-posted)")); *pui32TimerEnable |= 4; } OSUnMapPhysToLin(pui32TimerEnable, 4, PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, hTimerEnable); sTimerRegPhysBase.uiAddr = SYS_OMAP3_GPTIMER_ENABLE_SYS_PHYS_BASE; pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase, 4, PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, &hTimerEnable); if (pui32TimerEnable == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed")); goto ExitDisableGPT11ICK; } *pui32TimerEnable = 3; OSUnMapPhysToLin(pui32TimerEnable, 4, PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED, hTimerEnable); psSysSpecData->sTimerRegPhysBase = sTimerRegPhysBase; eError = PVRSRV_OK; goto Exit; ExitDisableGPT11ICK: #if defined(PVR_OMAP4_TIMING_PRCM) clk_disable(psSysSpecData->psGPT11_ICK); ExitDisableGPT11FCK: clk_disable(psSysSpecData->psGPT11_FCK); ExitError: #endif eError = PVRSRV_ERROR_CLOCK_REQUEST_FAILED; Exit: return eError; }
PVRSRV_ERROR PVRSRVPerProcessDataConnect(IMG_UINT32 ui32PID, IMG_UINT32 ui32Flags) { PVRSRV_PER_PROCESS_DATA *psPerProc; IMG_HANDLE hBlockAlloc; PVRSRV_ERROR eError = PVRSRV_OK; PVR_ASSERT(psHashTab != IMG_NULL); psPerProc = (PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (IMG_UINTPTR_T)ui32PID); if (psPerProc == IMG_NULL) { eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(*psPerProc), (IMG_PVOID *)&psPerProc, &hBlockAlloc, "Per Process Data"); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate per-process data (%d)", eError)); return eError; } OSMemSet(psPerProc, 0, sizeof(*psPerProc)); psPerProc->hBlockAlloc = hBlockAlloc; /* * FIXME: using a hash to retrieve psPerProc makes not much * sense. We always want to have this struct on the IOCTL path * for the current task, so it'd be just a matter of storing * it in the file private object. Until this is resolved and * we get rid of this pid specific lookup make sure the above * assumption holds. */ WARN_ON(OSGetCurrentProcessIDKM() != ui32PID); get_task_comm(psPerProc->name, current); if (!HASH_Insert(psHashTab, (IMG_UINTPTR_T)ui32PID, (IMG_UINTPTR_T)psPerProc)) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't insert per-process data into hash table")); eError = PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED; goto failure; } psPerProc->ui32PID = ui32PID; psPerProc->ui32RefCount = 0; #if defined(PERPROC_LIST) List_PVRSRV_PER_PROCESS_DATA_Insert(&psPerProcList, psPerProc); /*PVR_LOG(("MarkTupsperproc %d\n", ui32PID));*/ #endif #if defined(SUPPORT_PDUMP_MULTI_PROCESS) if (ui32Flags == SRV_FLAGS_PDUMP_ACTIVE) { psPerProc->bPDumpActive = IMG_TRUE; } #else PVR_UNREFERENCED_PARAMETER(ui32Flags); #endif eError = OSPerProcessPrivateDataInit(&psPerProc->hOsPrivateData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: OSPerProcessPrivateDataInit failed (%d)", eError)); goto failure; } eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE, &psPerProc->hPerProcData, psPerProc, PVRSRV_HANDLE_TYPE_PERPROC_DATA, PVRSRV_HANDLE_ALLOC_FLAG_NONE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle for per-process data (%d)", eError)); goto failure; } eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't allocate handle base for process (%d)", eError)); goto failure; } eError = OSPerProcessSetHandleOptions(psPerProc->psHandleBase); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't set handle options (%d)", eError)); goto failure; } eError = PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: Couldn't register with the resource manager")); goto failure; } } psPerProc->ui32RefCount++; PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d", ui32PID, psPerProc->ui32RefCount)); return eError; failure: (IMG_VOID)FreePerProcessData(psPerProc); return eError; }
static PVRSRV_ERROR FreePerProcessData(PVRSRV_PER_PROCESS_DATA *psPerProc) { PVRSRV_ERROR eError; IMG_UINTPTR_T uiPerProc; PVR_ASSERT(psPerProc != IMG_NULL); if (psPerProc == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: invalid parameter")); return PVRSRV_ERROR_INVALID_PARAMS; } uiPerProc = HASH_Remove(psHashTab, (IMG_UINTPTR_T)psPerProc->ui32PID); if (uiPerProc == 0) { PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't find process in per-process data hash table")); PVR_ASSERT(psPerProc->ui32PID == 0); } else { PVR_ASSERT((PVRSRV_PER_PROCESS_DATA *)uiPerProc == psPerProc); PVR_ASSERT(((PVRSRV_PER_PROCESS_DATA *)uiPerProc)->ui32PID == psPerProc->ui32PID); } #if defined(PERPROC_LIST) List_PVRSRV_PER_PROCESS_DATA_Remove(psPerProc); /*PVR_LOG(("MarkTu free perproce %d\n", psPerProc->ui32PID));*/ #endif if (psPerProc->psHandleBase != IMG_NULL) { eError = PVRSRVFreeHandleBase(psPerProc->psHandleBase); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free handle base for process (%d)", eError)); return eError; } } if (psPerProc->hPerProcData != IMG_NULL) { eError = PVRSRVReleaseHandle(KERNEL_HANDLE_BASE, psPerProc->hPerProcData, PVRSRV_HANDLE_TYPE_PERPROC_DATA); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't release per-process data handle (%d)", eError)); return eError; } } eError = OSPerProcessPrivateDataDeInit(psPerProc->hOsPrivateData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: OSPerProcessPrivateDataDeInit failed (%d)", eError)); return eError; } eError = OSFreeMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(*psPerProc), psPerProc, psPerProc->hBlockAlloc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "FreePerProcessData: Couldn't free per-process data (%d)", eError)); return eError; } return PVRSRV_OK; }
static IMG_INT PVRSRVBridgePMRSecureImportPMR(IMG_UINT32 ui32BridgeID, PVRSRV_BRIDGE_IN_PMRSECUREIMPORTPMR *psPMRSecureImportPMRIN, PVRSRV_BRIDGE_OUT_PMRSECUREIMPORTPMR *psPMRSecureImportPMROUT, CONNECTION_DATA *psConnection) { PMR * psPMRInt = IMG_NULL; IMG_HANDLE hPMRInt2 = IMG_NULL; PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SMM_PMRSECUREIMPORTPMR); psPMRSecureImportPMROUT->eError = PMRSecureImportPMR( psPMRSecureImportPMRIN->Export, &psPMRInt, &psPMRSecureImportPMROUT->uiSize, &psPMRSecureImportPMROUT->sAlign); /* Exit early if bridged call fails */ if(psPMRSecureImportPMROUT->eError != PVRSRV_OK) { goto PMRSecureImportPMR_exit; } /* Create a resman item and overwrite the handle with it */ hPMRInt2 = ResManRegisterRes(psConnection->hResManContext, RESMAN_TYPE_PMR, psPMRInt, /* FIXME: how can we avoid this cast? */ (RESMAN_FREE_FN)&PMRUnrefPMR); if (hPMRInt2 == IMG_NULL) { psPMRSecureImportPMROUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE; goto PMRSecureImportPMR_exit; } psPMRSecureImportPMROUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase, &psPMRSecureImportPMROUT->hPMR, (IMG_HANDLE) hPMRInt2, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, PVRSRV_HANDLE_ALLOC_FLAG_NONE ); if (psPMRSecureImportPMROUT->eError != PVRSRV_OK) { goto PMRSecureImportPMR_exit; } PMRSecureImportPMR_exit: if (psPMRSecureImportPMROUT->eError != PVRSRV_OK) { /* If we have a valid resman item we should undo the bridge function by freeing the resman item */ if (hPMRInt2) { PVRSRV_ERROR eError = ResManFreeResByPtr(hPMRInt2); /* Freeing a resource should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); } else if (psPMRInt) { PMRUnrefPMR(psPMRInt); } } return 0; }
static IMG_INT PVRSRVBridgePMRSecureExportPMR(IMG_UINT32 ui32BridgeID, PVRSRV_BRIDGE_IN_PMRSECUREEXPORTPMR *psPMRSecureExportPMRIN, PVRSRV_BRIDGE_OUT_PMRSECUREEXPORTPMR *psPMRSecureExportPMROUT, CONNECTION_DATA *psConnection) { PMR * psPMRInt = IMG_NULL; IMG_HANDLE hPMRInt2 = IMG_NULL; PMR * psPMROutInt = IMG_NULL; IMG_HANDLE hPMROutInt2 = IMG_NULL; CONNECTION_DATA *psSecureConnection; PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_SMM_PMRSECUREEXPORTPMR); { /* Look up the address from the handle */ psPMRSecureExportPMROUT->eError = PVRSRVLookupHandle(psConnection->psHandleBase, (IMG_HANDLE *) &hPMRInt2, psPMRSecureExportPMRIN->hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); if(psPMRSecureExportPMROUT->eError != PVRSRV_OK) { goto PMRSecureExportPMR_exit; } /* Look up the data from the resman address */ psPMRSecureExportPMROUT->eError = ResManFindPrivateDataByPtr(hPMRInt2, (IMG_VOID **) &psPMRInt); if(psPMRSecureExportPMROUT->eError != PVRSRV_OK) { goto PMRSecureExportPMR_exit; } } psPMRSecureExportPMROUT->eError = PMRSecureExportPMR(psConnection, psPMRInt, &psPMRSecureExportPMROUT->Export, &psPMROutInt, &psSecureConnection); /* Exit early if bridged call fails */ if(psPMRSecureExportPMROUT->eError != PVRSRV_OK) { goto PMRSecureExportPMR_exit; } /* Create a resman item and overwrite the handle with it */ hPMROutInt2 = ResManRegisterRes(psSecureConnection->hResManContext, RESMAN_TYPE_PMR, psPMROutInt, /* FIXME: how can we avoid this cast? */ (RESMAN_FREE_FN)&PMRSecureUnexportPMR); if (hPMROutInt2 == IMG_NULL) { psPMRSecureExportPMROUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE; goto PMRSecureExportPMR_exit; } PMRSecureExportPMR_exit: if (psPMRSecureExportPMROUT->eError != PVRSRV_OK) { /* If we have a valid resman item we should undo the bridge function by freeing the resman item */ if (hPMROutInt2) { PVRSRV_ERROR eError = ResManFreeResByPtr(hPMROutInt2); /* Freeing a resource should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); } else if (psPMROutInt) { PMRSecureUnexportPMR(psPMROutInt); } } return 0; }
static IMG_INT PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32BridgeID, PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN, PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT, CONNECTION_DATA *psConnection) { IMG_HANDLE hDevNodeInt = IMG_NULL; IMG_BYTE *psFrameworkCmdInt = IMG_NULL; IMG_HANDLE hPrivDataInt = IMG_NULL; RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = IMG_NULL; IMG_HANDLE hComputeContextInt2 = IMG_NULL; PVRSRV_BRIDGE_ASSERT_CMD(ui32BridgeID, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT); if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0) { psFrameworkCmdInt = OSAllocMem(psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)); if (!psFrameworkCmdInt) { psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto RGXCreateComputeContext_exit; } } /* Copy the data over */ if ( !OSAccessOK(PVR_VERIFY_READ, (IMG_VOID*) psRGXCreateComputeContextIN->psFrameworkCmd, psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) || (OSCopyFromUser(NULL, psFrameworkCmdInt, psRGXCreateComputeContextIN->psFrameworkCmd, psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK) ) { psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; goto RGXCreateComputeContext_exit; } { /* Look up the address from the handle */ psRGXCreateComputeContextOUT->eError = PVRSRVLookupHandle(psConnection->psHandleBase, (IMG_HANDLE *) &hDevNodeInt, psRGXCreateComputeContextIN->hDevNode, PVRSRV_HANDLE_TYPE_DEV_NODE); if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK) { goto RGXCreateComputeContext_exit; } } { /* Look up the address from the handle */ psRGXCreateComputeContextOUT->eError = PVRSRVLookupHandle(psConnection->psHandleBase, (IMG_HANDLE *) &hPrivDataInt, psRGXCreateComputeContextIN->hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK) { goto RGXCreateComputeContext_exit; } } psRGXCreateComputeContextOUT->eError = PVRSRVRGXCreateComputeContextKM(psConnection, hDevNodeInt, psRGXCreateComputeContextIN->ui32Priority, psRGXCreateComputeContextIN->sMCUFenceAddr, psRGXCreateComputeContextIN->ui32FrameworkCmdize, psFrameworkCmdInt, hPrivDataInt, &psComputeContextInt); /* Exit early if bridged call fails */ if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK) { goto RGXCreateComputeContext_exit; } /* Create a resman item and overwrite the handle with it */ hComputeContextInt2 = ResManRegisterRes(psConnection->hResManContext, RESMAN_TYPE_RGX_SERVER_COMPUTE_CONTEXT, psComputeContextInt, (RESMAN_FREE_FN)&PVRSRVRGXDestroyComputeContextKM); if (hComputeContextInt2 == IMG_NULL) { psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE; goto RGXCreateComputeContext_exit; } psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandle(psConnection->psHandleBase, &psRGXCreateComputeContextOUT->hComputeContext, (IMG_HANDLE) hComputeContextInt2, PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, PVRSRV_HANDLE_ALLOC_FLAG_NONE ); if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK) { goto RGXCreateComputeContext_exit; } RGXCreateComputeContext_exit: if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK) { /* If we have a valid resman item we should undo the bridge function by freeing the resman item */ if (hComputeContextInt2) { PVRSRV_ERROR eError = ResManFreeResByPtr(hComputeContextInt2); /* Freeing a resource should never fail... */ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); } else if (psComputeContextInt) { PVRSRVRGXDestroyComputeContextKM(psComputeContextInt); } } if (psFrameworkCmdInt) OSFreeMem(psFrameworkCmdInt); return 0; }
/*! ****************************************************************************** @Function GetMaxHandle @Description Get maximum handle number for given handle base @Input psBase - Pointer to handle base structure @Return Maximum handle number or 0 if handle limits not supported. ******************************************************************************/ static IMG_UINT32 GetMaxHandle(HANDLE_IMPL_BASE *psBase) { PVR_ASSERT(psBase); return psBase->ui32MaxHandleValue; }
/*! ****************************************************************************** @Function ReleaseHandle @Description Release a handle that is no longer needed. @Input psBase - Pointer to handle base structure hHandle - Handle to release ppvData - Points to a void data pointer @Output ppvData - Points to a void data pointer @Return PVRSRV_OK or PVRSRV_ERROR ******************************************************************************/ static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase, IMG_HANDLE hHandle, IMG_VOID **ppvData) { IMG_UINT32 ui32Index = HANDLE_TO_INDEX(hHandle); HANDLE_IMPL_DATA *psHandleData; IMG_VOID *pvData; PVR_ASSERT(psBase); /* Check handle index is in range */ if (!INDEX_IS_VALID(psBase, ui32Index)) { PVR_DPF((PVR_DBG_ERROR, "%s: Handle index out of range (%u >= %u)", __FUNCTION__, ui32Index, psBase->ui32TotalHandCount)); return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; } psHandleData = INDEX_TO_HANDLE_DATA(psBase, ui32Index); pvData = psHandleData->pvData; psHandleData->pvData = IMG_NULL; /* No free list management if purging is enabled */ if (!psBase->bPurgingEnabled) { if (psBase->ui32TotalFreeHandCount == 0) { PVR_ASSERT(psBase->ui32FirstFreeIndex == 0); PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); psBase->ui32FirstFreeIndex = ui32Index; } else { /* * Put the handle pointer on the end of the the free * handle pointer linked list. */ PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0); PVR_ASSERT(INDEX_TO_HANDLE_DATA(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0); INDEX_TO_HANDLE_DATA(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32Index + 1; } PVR_ASSERT(psHandleData->ui32NextIndexPlusOne == 0); /* Update the end of the free handle linked list */ psBase->ui32LastFreeIndexPlusOne = ui32Index + 1; } psBase->ui32TotalFreeHandCount++; INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32Index)++; PVR_ASSERT(INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32Index)<= HANDLE_BLOCK_SIZE); #if defined(DEBUG) { IMG_UINT32 ui32BlockedIndex; IMG_UINT32 ui32TotalFreeHandCount = 0; for (ui32BlockedIndex = 0; ui32BlockedIndex < psBase->ui32TotalHandCount; ui32BlockedIndex += HANDLE_BLOCK_SIZE) { ui32TotalFreeHandCount += INDEX_TO_BLOCK_FREE_HAND_COUNT(psBase, ui32BlockedIndex); } PVR_ASSERT(ui32TotalFreeHandCount == psBase->ui32TotalFreeHandCount); } #endif /* defined(DEBUG) */ if (ppvData) { *ppvData = pvData; } #if defined(DEBUG_HANDLEALLOC_KM) PVR_DPF((PVR_DBG_MESSAGE, "Handle release base %p hdl %p", psBase, hHandle)); #endif return PVRSRV_OK; }
static void MMapPMROpen(struct vm_area_struct* ps_vma) { /* Our VM flags should ensure this function never gets called */ PVR_ASSERT(0); }
IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr, IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Timeoutus, IMG_UINT32 ui32PollPeriodus, IMG_BOOL bAllowPreemption) { #if defined (EMULATOR) { PVR_UNREFERENCED_PARAMETER(bAllowPreemption); #if !defined(__linux__) PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); #endif do { if((*pui32LinMemAddr & ui32Mask) == ui32Value) { return PVRSRV_OK; } #if defined(__linux__) OSWaitus(ui32PollPeriodus); #else OSReleaseThreadQuanta(); #endif } while (ui32Timeoutus); } #else { IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; if (bAllowPreemption) { PVR_ASSERT(ui32PollPeriodus >= 1000); } LOOP_UNTIL_TIMEOUT(ui32Timeoutus) { ui32ActualValue = (*pui32LinMemAddr & ui32Mask); if(ui32ActualValue == ui32Value) { return PVRSRV_OK; } if (bAllowPreemption) { OSSleepms(ui32PollPeriodus / 1000); } else { OSWaitus(ui32PollPeriodus); } } END_LOOP_UNTIL_TIMEOUT(); PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", ui32Value, ui32ActualValue, ui32Mask)); } #endif return PVRSRV_ERROR_TIMEOUT; }
/*! ****************************************************************************** @Function SysDeinitialise @Description De-initialises kernel services at 'driver unload' time @Return PVRSRV_ERROR ******************************************************************************/ PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData) { PVRSRV_ERROR eError; PVR_UNREFERENCED_PARAMETER(psSysData); if(gpsSysData->pvSOCTimerRegisterKM) { OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM, 4, PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED, gpsSysData->hSOCTimerRegisterOSMemHandle); } #if defined(SYS_USING_INTERRUPTS) if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR)) { eError = OSUninstallDeviceLISR(gpsSysData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed")); return eError; } } #endif if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR)) { eError = OSUninstallMISR(gpsSysData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed")); return eError; } } if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV)) { #if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) PVR_ASSERT(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)); /* Re-enable SGX clocks whilst SGX is being de-initialised */ eError = EnableSGXClocks(gpsSysData, IMG_TRUE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed")); return eError; } #endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */ /* De-initialise SGX */ eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device")); return eError; } } if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_DVFS_INIT)) { eError = SysDvfsDeinitialize(gpsSysSpecificData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to de-init DVFS")); gpsSysData = IMG_NULL; return eError; } } if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME)) { eError = SysPMRuntimeUnregister(); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to unregister with OSPM!")); gpsSysData = IMG_NULL; return eError; } } /* Disable system clocks - must happen after last access to hardware */ if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)) { DisableSystemClocks(gpsSysData); } if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA)) { eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure")); return eError; } } SysDeinitialiseCommon(gpsSysData); #if defined(NO_HARDWARE) if(gsSGXRegsCPUVAddr != IMG_NULL) { /* Free hardware resources. */ OSBaseFreeContigMemory(SYS_OWL_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase); gsSGXRegsCPUVAddr = IMG_NULL; gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; } #endif /* defined(NO_HARDWARE) */ gpsSysSpecificData->ui32SysSpecificData = 0; gpsSysSpecificData->bSGXInitComplete = IMG_FALSE; gpsSysData = IMG_NULL; /* owl deinitialise. */ //owl_remove_sysfs(&gpsPVRLDMDev->dev); return PVRSRV_OK; }
enum PVRSRV_ERROR PVRSRVPerProcessDataConnect(u32 ui32PID) { struct PVRSRV_PER_PROCESS_DATA *psPerProc; void *hBlockAlloc; enum PVRSRV_ERROR eError = PVRSRV_OK; PVR_ASSERT(psHashTab != NULL); psPerProc = (struct PVRSRV_PER_PROCESS_DATA *)HASH_Retrieve(psHashTab, (u32)ui32PID); if (psPerProc == NULL) { eError = OSAllocMem(PVRSRV_OS_NON_PAGEABLE_HEAP, sizeof(*psPerProc), (void **)&psPerProc, &hBlockAlloc); if (eError != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: " "Couldn't allocate per-process data (%d)", eError); return eError; } OSMemSet(psPerProc, 0, sizeof(*psPerProc)); psPerProc->hBlockAlloc = hBlockAlloc; if (!HASH_Insert(psHashTab, (u32) ui32PID, (u32)psPerProc)) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: " "Couldn't insert per-process data into hash table"); eError = PVRSRV_ERROR_GENERIC; goto failure; } psPerProc->ui32PID = ui32PID; psPerProc->ui32RefCount = 0; eError = PVRSRVAllocHandle(KERNEL_HANDLE_BASE, &psPerProc->hPerProcData, psPerProc, PVRSRV_HANDLE_TYPE_PERPROC_DATA, PVRSRV_HANDLE_ALLOC_FLAG_NONE); if (eError != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: " "Couldn't allocate handle for per-process data (%d)", eError); goto failure; } eError = PVRSRVAllocHandleBase(&psPerProc->psHandleBase, ui32PID); if (eError != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: " "Couldn't allocate handle base for process (%d)", eError); goto failure; } eError = PVRSRVResManConnect(psPerProc, &psPerProc->hResManContext); if (eError != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVPerProcessDataConnect: " "Couldn't register with the resource manager"); goto failure; } } psPerProc->ui32RefCount++; PVR_DPF(PVR_DBG_MESSAGE, "PVRSRVPerProcessDataConnect: Process 0x%x has ref-count %d", ui32PID, psPerProc->ui32RefCount); return eError; failure: (void)FreePerProcessData(psPerProc); return eError; }
IMG_VOID DisableSGXClocks(SYS_DATA *psSysData) { #if !defined(NO_HARDWARE) SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0) { return; } PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks")); SysDisableSGXInterrupts(psSysData); #if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) { int res = pm_runtime_put_sync(&gpsPVRLDMDev->dev); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: pm_runtime_put_sync failed (%d)", -res)); } } #if defined(SYS_OMAP4_HAS_DVFS_FRAMEWORK) { struct gpu_platform_data *pdata; int res; pdata = (struct gpu_platform_data *)gpsPVRLDMDev->dev.platform_data; if (psSysSpecData->ui32SGXFreqListIndex != 0) { PVR_ASSERT(pdata->device_scale != IMG_NULL); res = pdata->device_scale(&gpsPVRLDMDev->dev, &gpsPVRLDMDev->dev, psSysSpecData->pui32SGXFreqList[0]); if (res == 0) { psSysSpecData->ui32SGXFreqListIndex = 0; } else if (res == -EBUSY) { PVR_DPF((PVR_DBG_WARNING, "DisableSGXClocks: Unable to scale SGX frequency (EBUSY)")); psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1; } else if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: Unable to scale SGX frequency (%d)", res)); psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1; } } } #endif #endif atomic_set(&psSysSpecData->sSGXClocksEnabled, 0); #else PVR_UNREFERENCED_PARAMETER(psSysData); #endif }
IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr, IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Timeoutus, IMG_UINT32 ui32PollPeriodus, IMG_BOOL bAllowPreemption) { #if defined (EMULATOR) { PVR_UNREFERENCED_PARAMETER(bAllowPreemption); #if !defined(__linux__) PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); #endif /* For the Emulator we want the system to stop when a lock-up is detected so the state can be analysed. * Also the Emulator is much slower than real silicon so timeouts are not valid. */ do { if((*pui32LinMemAddr & ui32Mask) == ui32Value) { return PVRSRV_OK; } #if defined(__linux__) OSWaitus(ui32PollPeriodus); #else OSReleaseThreadQuanta(); #endif } while (ui32Timeoutus); /* Endless loop only for the Emulator */ } #else { IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */ if (bAllowPreemption) { PVR_ASSERT(ui32PollPeriodus >= 1000); } /* PRQA S 3415,4109 1 */ /* macro format critical - leave alone */ LOOP_UNTIL_TIMEOUT(ui32Timeoutus) { ui32ActualValue = (*pui32LinMemAddr & ui32Mask); if(ui32ActualValue == ui32Value) { return PVRSRV_OK; } if (bAllowPreemption) { OSSleepms(ui32PollPeriodus / 1000); } else { OSWaitus(ui32PollPeriodus); } } END_LOOP_UNTIL_TIMEOUT(); PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", ui32Value, ui32ActualValue, ui32Mask)); } #endif /* #if defined (EMULATOR) */ return PVRSRV_ERROR_TIMEOUT; }
/* SCPCreate */ IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV SCPCreate(IMG_UINT32 ui32CCBSizeLog2, SCP_CONTEXT **ppsContext) { SCP_CONTEXT *psContext; IMG_UINT32 ui32Power2QueueSize = 1 << ui32CCBSizeLog2; PVRSRV_ERROR eError; /* allocate an internal queue info structure */ psContext = OSAllocMem(sizeof(SCP_CONTEXT)); if (psContext == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR,"SCPCreate: Failed to alloc queue struct")); eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto ErrorExit; } OSMemSet(psContext, 0, sizeof(SCP_CONTEXT)); /* allocate the command queue buffer - allow for overrun */ psContext->pvCCB = OSAllocMem(ui32Power2QueueSize); if (psContext->pvCCB == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR,"SCPCreate: Failed to alloc queue buffer")); eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto ErrorExit; } /* Sanity check: Should be zeroed by OSMemSet */ PVR_ASSERT(psContext->ui32ReadOffset == 0); PVR_ASSERT(psContext->ui32WriteOffset == 0); psContext->ui32CCBSize = ui32Power2QueueSize; eError = OSLockCreate(&psContext->hLock, LOCK_TYPE_NONE); if (eError != PVRSRV_OK) { goto ErrorExit; } eError = PVRSRVServerSyncRequesterRegisterKM(&psContext->psSyncRequesterID); if (eError != PVRSRV_OK) { goto ErrorExit; } SCP_DEBUG_PRINT("%s: New SCP %p of size %d", __FUNCTION__, psContext, ui32Power2QueueSize); *ppsContext = psContext; return PVRSRV_OK; ErrorExit: if(psContext) { if(psContext->pvCCB) { OSFreeMem(psContext->pvCCB); psContext->pvCCB = IMG_NULL; } OSFreeMem(psContext); } return eError; }
IMG_INT32 PVRSRV_BridgeDispatchKM(struct file *pFile, IMG_UINT unref__ ioctlCmd, IMG_UINT32 arg) #endif { IMG_UINT32 cmd; #if !defined(SUPPORT_DRI_DRM) PVRSRV_BRIDGE_PACKAGE *psBridgePackageUM = (PVRSRV_BRIDGE_PACKAGE *)arg; PVRSRV_BRIDGE_PACKAGE sBridgePackageKM; #endif PVRSRV_BRIDGE_PACKAGE *psBridgePackageKM; IMG_UINT32 ui32PID = OSGetCurrentProcessIDKM(); PVRSRV_PER_PROCESS_DATA *psPerProc; IMG_INT err = -EFAULT; LinuxLockMutex(&gPVRSRVLock); #if defined(SUPPORT_DRI_DRM) PVR_UNREFERENCED_PARAMETER(dev); psBridgePackageKM = (PVRSRV_BRIDGE_PACKAGE *)arg; PVR_ASSERT(psBridgePackageKM != IMG_NULL); #else PVR_UNREFERENCED_PARAMETER(ioctlCmd); psBridgePackageKM = &sBridgePackageKM; if(!OSAccessOK(PVR_VERIFY_WRITE, psBridgePackageUM, sizeof(PVRSRV_BRIDGE_PACKAGE))) { PVR_DPF((PVR_DBG_ERROR, "%s: Received invalid pointer to function arguments", __FUNCTION__)); goto unlock_and_return; } if(OSCopyFromUser(IMG_NULL, psBridgePackageKM, psBridgePackageUM, sizeof(PVRSRV_BRIDGE_PACKAGE)) != PVRSRV_OK) { goto unlock_and_return; } #endif cmd = psBridgePackageKM->ui32BridgeID; #if defined(MODULE_TEST) switch (cmd) { case PVRSRV_BRIDGE_SERVICES_TEST_MEM1: { PVRSRV_ERROR eError = MemTest1(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_MEM2: { PVRSRV_ERROR eError = MemTest2(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_RESOURCE: { PVRSRV_ERROR eError = ResourceTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_EVENTOBJECT: { PVRSRV_ERROR eError = EventObjectTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_MEMMAPPING: { PVRSRV_ERROR eError = MemMappingTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_PROCESSID: { PVRSRV_ERROR eError = ProcessIDTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_CLOCKUSWAITUS: { PVRSRV_ERROR eError = ClockusWaitusTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_TIMER: { PVRSRV_ERROR eError = TimerTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_PRIVSRV: { PVRSRV_ERROR eError = PrivSrvTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; case PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA: { IMG_UINT32 ui32PID; PVRSRV_PER_PROCESS_DATA *psPerProc; PVRSRV_ERROR eError; ui32PID = OSGetCurrentProcessIDKM(); PVRSRVTrace("PVRSRV_BRIDGE_SERVICES_TEST_COPYDATA %d", ui32PID); psPerProc = PVRSRVPerProcessData(ui32PID); eError = CopyDataTest(psBridgePackageKM->pvParamIn, psBridgePackageKM->pvParamOut, psPerProc); *(PVRSRV_ERROR*)psBridgePackageKM->pvParamOut = eError; err = 0; goto unlock_and_return; } case PVRSRV_BRIDGE_SERVICES_TEST_POWERMGMT: { PVRSRV_ERROR eError = PowerMgmtTest(); if (psBridgePackageKM->ui32OutBufferSize == sizeof(PVRSRV_BRIDGE_RETURN)) { PVRSRV_BRIDGE_RETURN* pReturn = (PVRSRV_BRIDGE_RETURN*)psBridgePackageKM->pvParamOut ; pReturn->eError = eError; } } err = 0; goto unlock_and_return; } #endif if(cmd != PVRSRV_BRIDGE_CONNECT_SERVICES) { PVRSRV_ERROR eError; eError = PVRSRVLookupHandle(KERNEL_HANDLE_BASE, (IMG_PVOID *)&psPerProc, psBridgePackageKM->hKernelServices, PVRSRV_HANDLE_TYPE_PERPROC_DATA); if(eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Invalid kernel services handle (%d)", __FUNCTION__, eError)); goto unlock_and_return; } if(psPerProc->ui32PID != ui32PID) { PVR_DPF((PVR_DBG_ERROR, "%s: Process %d tried to access data " "belonging to process %d", __FUNCTION__, ui32PID, psPerProc->ui32PID)); goto unlock_and_return; } } else { psPerProc = PVRSRVPerProcessData(ui32PID); if(psPerProc == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "PVRSRV_BridgeDispatchKM: " "Couldn't create per-process data area")); goto unlock_and_return; } } psBridgePackageKM->ui32BridgeID = PVRSRV_GET_BRIDGE_ID(psBridgePackageKM->ui32BridgeID); #if defined(PVR_SECURE_FD_EXPORT) switch(cmd) { case PVRSRV_BRIDGE_EXPORT_DEVICEMEM: { PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); if(psPrivateData->hKernelMemInfo) { PVR_DPF((PVR_DBG_ERROR, "%s: Can only export one MemInfo " "per file descriptor", __FUNCTION__)); err = -EINVAL; goto unlock_and_return; } break; } case PVRSRV_BRIDGE_MAP_DEV_MEMORY: { PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *psMapDevMemIN = (PVRSRV_BRIDGE_IN_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamIn; PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); if(!psPrivateData->hKernelMemInfo) { PVR_DPF((PVR_DBG_ERROR, "%s: File descriptor has no " "associated MemInfo handle", __FUNCTION__)); err = -EINVAL; goto unlock_and_return; } psMapDevMemIN->hKernelMemInfo = psPrivateData->hKernelMemInfo; break; } default: { PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); if(psPrivateData->hKernelMemInfo) { PVR_DPF((PVR_DBG_ERROR, "%s: Import/Export handle tried " "to use privileged service", __FUNCTION__)); goto unlock_and_return; } break; } } #endif err = BridgedDispatchKM(psPerProc, psBridgePackageKM); if(err != PVRSRV_OK) goto unlock_and_return; switch(cmd) { #if defined(PVR_SECURE_FD_EXPORT) case PVRSRV_BRIDGE_EXPORT_DEVICEMEM: { PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *psExportDeviceMemOUT = (PVRSRV_BRIDGE_OUT_EXPORTDEVICEMEM *)psBridgePackageKM->pvParamOut; PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); psPrivateData->hKernelMemInfo = psExportDeviceMemOUT->hMemInfo; #if defined(SUPPORT_MEMINFO_IDS) psExportDeviceMemOUT->ui64Stamp = psPrivateData->ui64Stamp = ++ui64Stamp; #endif break; } #endif #if defined(SUPPORT_MEMINFO_IDS) case PVRSRV_BRIDGE_MAP_DEV_MEMORY: { PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *psMapDeviceMemoryOUT = (PVRSRV_BRIDGE_OUT_MAP_DEV_MEMORY *)psBridgePackageKM->pvParamOut; PVRSRV_FILE_PRIVATE_DATA *psPrivateData = PRIVATE_DATA(pFile); psMapDeviceMemoryOUT->sDstClientMemInfo.ui64Stamp = psPrivateData->ui64Stamp; break; } case PVRSRV_BRIDGE_MAP_DEVICECLASS_MEMORY: { PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *psDeviceClassMemoryOUT = (PVRSRV_BRIDGE_OUT_MAP_DEVICECLASS_MEMORY *)psBridgePackageKM->pvParamOut; psDeviceClassMemoryOUT->sClientMemInfo.ui64Stamp = ++ui64Stamp; break; } #endif default: break; } unlock_and_return: LinuxUnLockMutex(&gPVRSRVLock); return err; }
/* Map an import to the device */ IMG_INTERNAL PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, IMG_BOOL bMap, DEVMEM_IMPORT *psImport) { DEVMEM_DEVICE_IMPORT *psDeviceImport; IMG_BOOL bStatus; RA_BASE_T uiAllocatedAddr; RA_LENGTH_T uiAllocatedSize; IMG_DEV_VIRTADDR sBase; IMG_HANDLE hReservation; PVRSRV_ERROR eError; psDeviceImport = &psImport->sDeviceImport; OSLockAcquire(psDeviceImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psDeviceImport->ui32RefCount, psDeviceImport->ui32RefCount+1); if (psDeviceImport->ui32RefCount++ == 0) { _DevmemImportStructAcquire(psImport); OSLockAcquire(psHeap->hLock); psHeap->uiImportCount++; OSLockRelease(psHeap->hLock); if (psHeap->psCtx->hBridge != psImport->hBridge) { /* The import was done with a different connection then the memory context which means they are not compatible. */ eError = PVRSRV_ERROR_INVALID_PARAMS; goto failCheck; } /* Allocate space in the VM */ bStatus = RA_Alloc(psHeap->psQuantizedVMRA, psImport->uiSize, 0, /* flags: this RA doesn't use flags*/ psImport->uiAlign, &uiAllocatedAddr, &uiAllocatedSize, IMG_NULL /* don't care about per-import priv data */ ); if (!bStatus) { eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM; goto failVMRAAlloc; } /* No reason for the allocated virtual size to be different from the PMR's size */ PVR_ASSERT(uiAllocatedSize == psImport->uiSize); sBase.uiAddr = uiAllocatedAddr; /* Setup page tables for the allocated VM space */ eError = BridgeDevmemIntReserveRange(psHeap->psCtx->hBridge, psHeap->hDevMemServerHeap, sBase, uiAllocatedSize, &hReservation); if (eError != PVRSRV_OK) { goto failReserve; } if (bMap) { DEVMEM_FLAGS_T uiMapFlags; uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK; /* Actually map the PMR to allocated VM space */ eError = BridgeDevmemIntMapPMR(psHeap->psCtx->hBridge, psHeap->hDevMemServerHeap, hReservation, psImport->hPMR, uiMapFlags, &psDeviceImport->hMapping); if (eError != PVRSRV_OK) { goto failMap; } psDeviceImport->bMapped = IMG_TRUE; } /* Setup device mapping specific parts of the mapping info */ psDeviceImport->hReservation = hReservation; psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr; psDeviceImport->psHeap = psHeap; } else { /* Check that we've been asked to map it into the same heap 2nd time around */ if (psHeap != psDeviceImport->psHeap) { eError = PVRSRV_ERROR_INVALID_HEAP; goto failParams; } } OSLockRelease(psDeviceImport->hLock); return PVRSRV_OK; failMap: BridgeDevmemIntUnreserveRange(psHeap->psCtx->hBridge, hReservation); failReserve: RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr); failVMRAAlloc: failCheck: _DevmemImportStructRelease(psImport); OSLockAcquire(psHeap->hLock); psHeap->uiImportCount--; OSLockRelease(psHeap->hLock); failParams: OSLockRelease(psDeviceImport->hLock); PVR_ASSERT(eError != PVRSRV_OK); return eError; }
PVRSRV_ERROR PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount, SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock, IMG_UINT32 ui32ClientSyncCount, IMG_UINT32 *paui32SyncBlockIndex, IMG_UINT32 *paui32Index, IMG_UINT32 ui32ServerSyncCount, SERVER_SYNC_PRIMITIVE **papsServerSync, SERVER_OP_COOKIE **ppsServerCookie) { SERVER_OP_COOKIE *psNewCookie; IMG_UINT32 ui32BlockAllocSize; IMG_UINT32 ui32ServerAllocSize; IMG_UINT32 ui32ClientAllocSize; IMG_UINT32 ui32TotalAllocSize; IMG_UINT32 i; IMG_CHAR *pcPtr; PVRSRV_ERROR eError; /* Allocate space for all the sync block list */ ui32BlockAllocSize = ui32SyncBlockCount * (sizeof(SYNC_PRIMITIVE_BLOCK *)); /* Allocate space for all the client sync size elements */ ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32)); /* Allocate space for all the server sync size elements */ ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(SERVER_SYNC_PRIMITIVE *) + (2 * sizeof(IMG_UINT32))); ui32TotalAllocSize = sizeof(SERVER_OP_COOKIE) + ui32BlockAllocSize + ui32ServerAllocSize + ui32ClientAllocSize; psNewCookie = OSAllocMem(ui32TotalAllocSize); pcPtr = (IMG_CHAR *) psNewCookie; if (!psNewCookie) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e0; } OSMemSet(psNewCookie, 0, ui32TotalAllocSize); /* Setup the pointers */ pcPtr += sizeof(SERVER_OP_COOKIE); psNewCookie->papsSyncPrimBlock = (SYNC_PRIMITIVE_BLOCK **) pcPtr; pcPtr += sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount; psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr; pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount; psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr; pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount; psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr; pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount; psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr; pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount; psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr; pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount; psNewCookie->papsServerSync =(SERVER_SYNC_PRIMITIVE **) pcPtr; pcPtr += sizeof(SERVER_SYNC_PRIMITIVE *) * ui32ServerSyncCount; psNewCookie->paui32ServerFenceValue = (IMG_UINT32 *) pcPtr; pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount; psNewCookie->paui32ServerUpdateValue = (IMG_UINT32 *) pcPtr; pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount; /* Check the pointer setup went ok */ PVR_ASSERT(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize)); psNewCookie->ui32SyncBlockCount= ui32SyncBlockCount; psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount; psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount; psNewCookie->bActive = IMG_FALSE; /* Copy all the data into our server cookie */ OSMemCopy(psNewCookie->papsSyncPrimBlock, papsSyncPrimBlock, sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount); OSMemCopy(psNewCookie->paui32SyncBlockIndex, paui32SyncBlockIndex, sizeof(IMG_UINT32) * ui32ClientSyncCount); OSMemCopy(psNewCookie->paui32Index, paui32Index, sizeof(IMG_UINT32) * ui32ClientSyncCount); OSMemCopy(psNewCookie->papsServerSync, papsServerSync, sizeof(SERVER_SYNC_PRIMITIVE *) *ui32ServerSyncCount); /* Take a reference on all the sync blocks and server syncs so they can't be freed while we're using them */ for (i=0;i<ui32SyncBlockCount;i++) { _SyncPrimitiveBlockRef(psNewCookie->papsSyncPrimBlock[i]); } for (i=0;i<ui32ServerSyncCount;i++) { ServerSyncRef(psNewCookie->papsServerSync[i]); } *ppsServerCookie = psNewCookie; return PVRSRV_OK; e0: return eError; }
PVRSRV_ERROR SysDeinitialise (SYS_DATA *psSysData) { PVRSRV_ERROR eError; if(gpsSysData->pvSOCTimerRegisterKM) { OSUnReservePhys(gpsSysData->pvSOCTimerRegisterKM, 4, PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED, gpsSysData->hSOCTimerRegisterOSMemHandle); } #if defined(SYS_USING_INTERRUPTS) if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LISR)) { eError = OSUninstallDeviceLISR(psSysData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallDeviceLISR failed")); return eError; } } #endif if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_MISR)) { eError = OSUninstallMISR(psSysData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: OSUninstallMISR failed")); return eError; } } if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV)) { #if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT) PVR_ASSERT(SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)); eError = EnableSGXClocksWrap(gpsSysData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: EnableSGXClocks failed")); return eError; } #endif eError = PVRSRVDeinitialiseDevice (gui32SGXDeviceID); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init the device")); return eError; } } #if 0 if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME)) { eError = SysPMRuntimeUnregister(); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: Failed to unregister with OSPM!")); gpsSysData = IMG_NULL; return eError; } } #endif if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS)) { DisableSystemClocks(gpsSysData); } if (SYS_SPECIFIC_DATA_TEST(gpsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA)) { eError = OSDeInitEnvData(gpsSysData->pvEnvSpecificData); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysDeinitialise: failed to de-init env structure")); return eError; } } SysDeinitialiseCommon(gpsSysData); #if defined(NO_HARDWARE) || defined(SGX_OCP_REGS_ENABLED) if(gsSGXRegsCPUVAddr != IMG_NULL) { #if defined(NO_HARDWARE) OSBaseFreeContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, gsSGXRegsCPUVAddr, gsSGXDeviceMap.sRegsCpuPBase); #else #if defined(SGX_OCP_REGS_ENABLED) OSUnMapPhysToLin(gsSGXRegsCPUVAddr, gsSGXDeviceMap.ui32RegsSize, PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY, IMG_NULL); gpvOCPRegsLinAddr = IMG_NULL; #endif #endif gsSGXRegsCPUVAddr = IMG_NULL; gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; } #endif gpsSysSpecificData->ui32SysSpecificData = 0; gpsSysSpecificData->bSGXInitComplete = IMG_FALSE; gpsSysData = IMG_NULL; return PVRSRV_OK; }
IMG_INTERNAL PVRSRV_ERROR TLClientOpenStream(IMG_HANDLE hSrvHandle, IMG_PCHAR pszName, IMG_UINT32 ui32Mode, IMG_HANDLE* phSD) { PVRSRV_ERROR eError = PVRSRV_OK; TL_STREAM_DESC* psSD = 0; DEVMEM_SERVER_EXPORTCOOKIE hServerExportCookie; PVR_ASSERT(hSrvHandle); PVR_ASSERT(pszName); PVR_ASSERT(phSD); *phSD = NULL; /* Allocate memory for the stream descriptor object, initialise with * "no data read" yet. */ psSD = OSAllocZMem(sizeof(TL_STREAM_DESC)); if (psSD == NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; PVR_DPF((PVR_DBG_ERROR, "BridgeTLOpenStream: KM returned %d", eError)); goto e0; } psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; /* Send open stream request to kernel server to get stream handle and * buffer cookie so we can get access to the buffer in this process. */ eError = BridgeTLOpenStream(hSrvHandle, pszName, ui32Mode, &psSD->hServerSD, &hServerExportCookie); if (eError != PVRSRV_OK) { if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) && (eError == PVRSRV_ERROR_TIMEOUT)) { goto e1; } PVR_LOGG_IF_ERROR(eError, "BridgeTLOpenStream", e1); } /* Convert server export cookie into a cookie for use by this client */ eError = DevmemMakeServerExportClientExport(hSrvHandle, hServerExportCookie, &psSD->sExportCookie); PVR_LOGG_IF_ERROR(eError, "DevmemMakeServerExportClientExport", e2); /* Now convert client cookie into a client handle on the buffer's * physical memory region */ eError = DevmemImport(hSrvHandle, &psSD->sExportCookie, PVRSRV_MEMALLOCFLAG_CPU_READABLE, "TLClientCookie", &psSD->psUMmemDesc); PVR_LOGG_IF_ERROR(eError, "DevmemImport", e3); /* Now map the memory into the virtual address space of this process. */ eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (IMG_PVOID *) &psSD->pBaseAddr); PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4); /* Return client descriptor handle to caller */ *phSD = psSD; return PVRSRV_OK; /* Clean up post buffer setup */ e4: DevmemFree(psSD->psUMmemDesc); e3: (void) DevmemUnmakeServerExportClientExport(hSrvHandle, &psSD->sExportCookie); /* Clean up post stream open */ e2: BridgeTLCloseStream(hSrvHandle, psSD->hServerSD); /* Cleanup post allocation of the descriptor object */ e1: OSFreeMem(psSD); e0: return eError; }
/*! ****************************************************************************** @Function DisableSGXClocks @Description Disable SGX clocks. @Return none ******************************************************************************/ IMG_VOID DisableSGXClocks(SYS_DATA *psSysData) { #if !defined(NO_HARDWARE) SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; /* SGX clocks already disabled? */ if (atomic_read(&psSysSpecData->sSGXClocksEnabled) == 0) { return; } PVR_DPF((PVR_DBG_MESSAGE, "DisableSGXClocks: Disabling SGX Clocks")); #if !defined(PM_RUNTIME_SUPPORT) clk_disable(psSysSpecData->psSGX_FCK); #endif SysDisableSGXInterrupts(psSysData); #if defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) { #if defined(PM_RUNTIME_SUPPORT) int res = pm_runtime_put_sync(&gpsPVRLDMDev->dev); if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: pm_runtime_put_sync failed (%d)", -res)); } #endif } #if defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) { struct gpu_platform_data *pdata; int res; pdata = (struct gpu_platform_data *)gpsPVRLDMDev->dev.platform_data; /* * Request minimum frequency (list index 0) from DVFS layer if not already * set. DVFS may report busy if early in initialization, but all other errors * are considered serious. Upon any error we proceed assuming our safe frequency * value to be in use as indicated by the "unknown" index. */ if (psSysSpecData->ui32SGXFreqListIndex != 0) { PVR_ASSERT(pdata->device_scale != IMG_NULL); res = pdata->device_scale(&gpsPVRLDMDev->dev, #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,4,0)) &gpsPVRLDMDev->dev, #endif psSysSpecData->pui32SGXFreqList[0]); if (res == 0) { psSysSpecData->ui32SGXFreqListIndex = 0; } else if (res == -EBUSY) { PVR_DPF((PVR_DBG_WARNING, "DisableSGXClocks: Unable to scale SGX frequency (EBUSY)")); psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1; } else if (res < 0) { PVR_DPF((PVR_DBG_ERROR, "DisableSGXClocks: Unable to scale SGX frequency (%d)", res)); psSysSpecData->ui32SGXFreqListIndex = psSysSpecData->ui32SGXFreqListSize - 1; } } } #endif /* defined(SYS_OMAP_HAS_DVFS_FRAMEWORK) */ #endif /* defined(LDM_PLATFORM) && !defined(PVR_DRI_DRM_NOT_PCI) */ /* Indicate that the SGX clocks are disabled */ atomic_set(&psSysSpecData->sSGXClocksEnabled, 0); #else /* !defined(NO_HARDWARE) */ PVR_UNREFERENCED_PARAMETER(psSysData); #endif /* !defined(NO_HARDWARE) */ }
int MMapPMR(struct file* pFile, struct vm_area_struct* ps_vma) { PVRSRV_ERROR eError; IMG_HANDLE hSecurePMRHandle; IMG_SIZE_T uiLength; IMG_DEVMEM_OFFSET_T uiOffset; unsigned long uiPFN; IMG_HANDLE hPMRResmanHandle; PMR *psPMR; PMR_FLAGS_T ulPMRFlags; IMG_UINT32 ui32CPUCacheFlags; unsigned long ulNewFlags = 0; pgprot_t sPageProt; #if defined(SUPPORT_DRM) // INTEL_TEMP // SINCE PVR_DRM_FILE_FROM_FILE is NOT found CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile->private_data); // INTEL_TEMP // SINCE PVR_DRM_FILE_FROM_FILE is NOT found //if (ps_vma->vm_pgoff > INT_MAX) //{ // ps_vma->vm_pgoff -= ((unsigned int)INT_MAX + 1); // return MMapGEM(pFile, ps_vma); //} #else CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile); #endif /* * Both PVRSRVLookupHandle and ResManFindPrivateDataByPtr * require the bridge mutex to be held for thread safety. */ LinuxLockMutex(&gPVRSRVLock); LinuxLockMutex(&g_sMMapMutex); hSecurePMRHandle=(IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff); eError = PVRSRVLookupHandle(psConnection->psHandleBase, (IMG_HANDLE *) &hPMRResmanHandle, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); if (eError != PVRSRV_OK) { goto e0; } eError = ResManFindPrivateDataByPtr(hPMRResmanHandle, (IMG_VOID **)&psPMR); if (eError != PVRSRV_OK) { goto e0; } /* Take a reference on the PMR, make's sure that it can't be freed while it's mapped into the user process */ PMRRefPMR(psPMR); LinuxUnLockMutex(&gPVRSRVLock); eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT); if (eError != PVRSRV_OK) { goto e1; } if (((ps_vma->vm_flags & VM_WRITE) != 0) && ((ps_vma->vm_flags & VM_SHARED) == 0)) { eError = PVRSRV_ERROR_INVALID_PARAMS; goto e1; } /* we ought to call PMR_Flags() here to check the permissions against the requested mode, and possibly to set up the cache control protflags */ eError = PMR_Flags(psPMR, &ulPMRFlags); if (eError != PVRSRV_OK) { goto e1; } ulNewFlags = ps_vma->vm_flags; #if 0 /* Discard user read/write request, we will pull these flags from the PMR */ ulNewFlags &= ~(VM_READ | VM_WRITE); if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE) { ulNewFlags |= VM_READ; } if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) { ulNewFlags |= VM_WRITE; } #endif ps_vma->vm_flags = ulNewFlags; #if defined(__arm__) sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags)); #elif defined(__i386__) || defined(__x86_64) sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags)); #elif defined(__metag__) sPageProt = vm_get_page_prot(ulNewFlags); #else #error Please add pgprot_modify equivalent for your system #endif ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags); switch (ui32CPUCacheFlags) { case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: sPageProt = pgprot_noncached(sPageProt); break; case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE: sPageProt = pgprot_writecombine(sPageProt); break; case PVRSRV_MEMALLOCFLAG_CPU_CACHED: break; default: eError = PVRSRV_ERROR_INVALID_PARAMS; goto e1; } ps_vma->vm_page_prot = sPageProt; uiLength = ps_vma->vm_end - ps_vma->vm_start; for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT) { IMG_SIZE_T uiNumContiguousBytes; IMG_INT32 iStatus; IMG_CPU_PHYADDR sCpuPAddr; IMG_BOOL bValid; struct page *psPage = NULL; uiNumContiguousBytes = 1ULL<<PAGE_SHIFT; eError = PMR_CpuPhysAddr(psPMR, uiOffset, &sCpuPAddr, &bValid); PVR_ASSERT(eError == PVRSRV_OK); if (eError) { goto e2; } /* Only map in pages that are valid, any that aren't will be picked up by the nopage handler which will return a zeroed page for us */ if (bValid) { uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT; PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr); PVR_ASSERT(pfn_valid(uiPFN)); psPage = pfn_to_page(uiPFN); iStatus = vm_insert_page(ps_vma, ps_vma->vm_start + uiOffset, psPage); PVR_ASSERT(iStatus == 0); if(iStatus) { // N.B. not the right error code, but, it doesn't get propagated anyway... :( eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e2; } } (void)pFile; } ps_vma->vm_flags |= VM_IO; /* Don't include the mapping in core dumps */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) ps_vma->vm_flags |= VM_DONTDUMP; #else ps_vma->vm_flags |= VM_RESERVED; #endif /* * Disable mremap because our nopage handler assumes all * page requests have already been validated. */ ps_vma->vm_flags |= VM_DONTEXPAND; /* Don't allow mapping to be inherited across a process fork */ ps_vma->vm_flags |= VM_DONTCOPY; /* let us see the PMR so we can unlock it later */ ps_vma->vm_private_data = psPMR; /* Install open and close handlers for ref-counting */ ps_vma->vm_ops = &gsMMapOps; LinuxUnLockMutex(&g_sMMapMutex); return 0; /* error exit paths follow */ e2: PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error. Abort!")); PMRUnlockSysPhysAddresses(psPMR); e1: PMRUnrefPMR(psPMR); goto em1; e0: LinuxUnLockMutex(&gPVRSRVLock); em1: PVR_ASSERT(eError != PVRSRV_OK); PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError)); LinuxUnLockMutex(&g_sMMapMutex); return -ENOENT; // -EAGAIN // or what? }
/* * RGXRegisterMemoryContext */ PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT *psMMUContext, IMG_HANDLE *hPrivData) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; DEVMEM_FLAGS_T uiFWMemContextMemAllocFlags; RGXFWIF_FWMEMCONTEXT *psFWMemContext; DEVMEM_MEMDESC *psFWMemContextMemDesc; SERVER_MMU_CONTEXT *psServerMMUContext; if (psDevInfo->psKernelMMUCtx == IMG_NULL) { /* * This must be the creation of the Kernel memory context. Take a copy * of the MMU context for use when programming the BIF. */ psDevInfo->psKernelMMUCtx = psMMUContext; } else { psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext)); if (psServerMMUContext == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto fail_alloc_server_ctx; } psServerMMUContext->psDevInfo = psDevInfo; /* * This FW MemContext is only mapped into kernel for initialisation purposes. * Otherwise this allocation is only used by the FW. * Therefore the GPU cache doesn't need coherency, * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick) */ uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) | PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; /* Allocate device memory for the firmware memory context for the new application. */ PDUMPCOMMENT("Allocate RGX firmware memory context"); /* FIXME: why cache-consistent? */ eError = DevmemFwAllocate(psDevInfo, sizeof(*psFWMemContext), uiFWMemContextMemAllocFlags, "FirmwareMemoryContext", &psFWMemContextMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to allocate firmware memory context (%u)", eError)); goto fail_alloc_fw_ctx; } /* Temporarily map the firmware memory context to the kernel. */ eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc, (IMG_VOID **)&psFWMemContext); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to map firmware memory context (%u)", eError)); goto fail_acquire_cpu_addr; } /* * Write the new memory context's page catalogue into the firmware memory * context for the client. */ eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)", eError)); DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); goto fail_acquire_base_addr; } /* * Set default values for the rest of the structure. */ psFWMemContext->uiPageCatBaseRegID = -1; psFWMemContext->uiBreakpointAddr = 0; psFWMemContext->uiBPHandlerAddr = 0; psFWMemContext->uiBreakpointCtl = 0; #if defined(SUPPORT_GPUVIRT_VALIDATION) { IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg); psFWMemContext->ui32OSid = ui32OSidReg; } #endif #if defined(PDUMP) { IMG_CHAR aszName[PMR_MAX_MEMSPNAME_SYMB_ADDR_LENGTH_DEFAULT]; IMG_DEVMEM_OFFSET_T uiOffset = 0; /* * Dump the Mem context allocation */ DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS); /* * Obtain a symbolic addr of the mem context structure */ eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, &uiOffset, aszName, PMR_MAX_MEMSPNAME_SYMB_ADDR_LENGTH_DEFAULT); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to generate a Dump Page Catalogue address (%u)", eError)); DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); goto fail_pdump_cat_base_addr; } /* * Dump the Page Cat tag in the mem context (symbolic address) */ eError = MMU_PDumpWritePageCatBase(psMMUContext, aszName, uiOffset, 8, /* 64-bit register write */ 0, 0, 0); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)", eError)); DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); goto fail_pdump_cat_base; } } #endif /* * Release kernel address acquired above. */ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); /* * Store the process information for this device memory context * for use with the host page-fault analysis. */ psServerMMUContext->uiPID = OSGetCurrentProcessID(); psServerMMUContext->psMMUContext = psMMUContext; psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc; if (OSSNPrintf(psServerMMUContext->szProcessName, RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME, "%s", OSGetCurrentProcessName()) == RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME) { psServerMMUContext->szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME-1] = '\0'; } OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode); OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); MMU_SetDeviceData(psMMUContext, psFWMemContextMemDesc); *hPrivData = psServerMMUContext; } return PVRSRV_OK; #if defined(PDUMP) fail_pdump_cat_base: fail_pdump_cat_base_addr: MMU_ReleaseBaseAddr(IMG_NULL); #endif fail_acquire_base_addr: /* Done before jumping to the fail point as the release is done before exit */ fail_acquire_cpu_addr: DevmemFwFree(psServerMMUContext->psFWMemContextMemDesc); fail_alloc_fw_ctx: OSFreeMem(psServerMMUContext); fail_alloc_server_ctx: PVR_ASSERT(eError != PVRSRV_OK); return eError; }
/*! ****************************************************************************** @Function ReallocHandleBlockArray @Description Reallocate the handle block array @Input psBase - Pointer to handle base structure ui32NewCount - The new total number of handles @Return Error code or PVRSRV_OK ******************************************************************************/ static PVRSRV_ERROR ReallocHandleBlockArray(HANDLE_IMPL_BASE *psBase, IMG_UINT32 ui32NewCount) { HANDLE_BLOCK *psOldArray = psBase->psHandleBlockArray; IMG_UINT32 ui32OldCount = psBase->ui32TotalHandCount; HANDLE_BLOCK *psNewArray = IMG_NULL; PVRSRV_ERROR eError = PVRSRV_OK; IMG_UINT32 ui32Index; if (ui32NewCount == ui32OldCount) { return PVRSRV_OK; } if (ui32NewCount != 0 && !psBase->bPurgingEnabled && ui32NewCount < ui32OldCount) { return PVRSRV_ERROR_INVALID_PARAMS; } if (((ui32OldCount % HANDLE_BLOCK_SIZE) != 0) || ((ui32NewCount % HANDLE_BLOCK_SIZE) != 0)) { PVR_ASSERT((ui32OldCount % HANDLE_BLOCK_SIZE) == 0); PVR_ASSERT((ui32NewCount % HANDLE_BLOCK_SIZE) == 0); return PVRSRV_ERROR_INVALID_PARAMS; } if (ui32NewCount != 0) { /* Allocate new handle array */ psNewArray = OSAllocMem(HANDLE_BLOCK_ARRAY_SIZE(ui32NewCount) * sizeof(HANDLE_BLOCK)); if (psNewArray == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate new handle array", __FUNCTION__)); eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto error; } if (ui32OldCount != 0) { OSMemCopy(psNewArray, psOldArray, HANDLE_BLOCK_ARRAY_SIZE(MIN(ui32NewCount, ui32OldCount)) * sizeof(HANDLE_BLOCK)); } } /* * If the new handle array is smaller than the old one, free * unused handle data structure arrays */ for (ui32Index = ui32NewCount; ui32Index < ui32OldCount; ui32Index += HANDLE_BLOCK_SIZE) { HANDLE_BLOCK *psHandleBlock = BLOCK_ARRAY_AND_INDEX_TO_HANDLE_BLOCK(psOldArray, ui32Index); OSFreeMem(psHandleBlock->psHandleDataArray); } /* * If the new handle array is bigger than the old one, allocate * new handle data structure arrays */ for (ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE) { /* PRQA S 0505 1 */ /* psNewArray is never NULL, see assert earlier */ HANDLE_BLOCK *psHandleBlock = BLOCK_ARRAY_AND_INDEX_TO_HANDLE_BLOCK(psNewArray, ui32Index); psHandleBlock->psHandleDataArray = OSAllocMem(sizeof(HANDLE_IMPL_DATA) * HANDLE_BLOCK_SIZE); if (psHandleBlock->psHandleDataArray != IMG_NULL) { IMG_UINT32 ui32SubIndex; psHandleBlock->ui32FreeHandCount = HANDLE_BLOCK_SIZE; for (ui32SubIndex = 0; ui32SubIndex < HANDLE_BLOCK_SIZE; ui32SubIndex++) { HANDLE_IMPL_DATA *psHandleData = psHandleBlock->psHandleDataArray + ui32SubIndex; psHandleData->hHandle = INDEX_TO_HANDLE(ui32SubIndex + ui32Index); psHandleData->pvData = IMG_NULL; psHandleData->ui32NextIndexPlusOne = 0; } } else { PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate handle structures", __FUNCTION__)); eError = PVRSRV_ERROR_OUT_OF_MEMORY; } } if (eError != PVRSRV_OK) { goto error; } #if defined(DEBUG_MAX_HANDLE_COUNT) /* Force handle failure to test error exit code */ if (ui32NewCount > DEBUG_MAX_HANDLE_COUNT) { PVR_DPF((PVR_DBG_ERROR, "%s: Max handle count (%u) reached", __FUNCTION__, DEBUG_MAX_HANDLE_COUNT)); eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto error; } #endif /* defined(DEBUG_MAX_HANDLE_COUNT) */ if (psOldArray != IMG_NULL) { /* Free old handle array */ OSFreeMem(psOldArray); } psBase->psHandleBlockArray = psNewArray; psBase->ui32TotalHandCount = ui32NewCount; if (ui32NewCount > ui32OldCount) { /* Check for wraparound */ PVR_ASSERT(psBase->ui32TotalFreeHandCount + (ui32NewCount - ui32OldCount) > psBase->ui32TotalFreeHandCount); /* PRQA S 3382 1 */ /* ui32NewCount always > ui32OldCount */ psBase->ui32TotalFreeHandCount += (ui32NewCount - ui32OldCount); /* * If purging is enabled, there is no free handle list * management, but as an optimization, when allocating * new handles, we use ui32FirstFreeIndex to point to * the first handle in a newly allocated block. */ if (psBase->ui32FirstFreeIndex == 0) { PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne == 0); psBase->ui32FirstFreeIndex = ui32OldCount; } else { if (!psBase->bPurgingEnabled) { PVR_ASSERT(psBase->ui32LastFreeIndexPlusOne != 0); PVR_ASSERT(INDEX_TO_HANDLE_DATA(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne == 0); INDEX_TO_HANDLE_DATA(psBase, psBase->ui32LastFreeIndexPlusOne - 1)->ui32NextIndexPlusOne = ui32OldCount + 1; } } if (!psBase->bPurgingEnabled) { psBase->ui32LastFreeIndexPlusOne = ui32NewCount; } } else { if (ui32NewCount == 0) { psBase->ui32TotalFreeHandCount = 0; psBase->ui32FirstFreeIndex = 0; psBase->ui32LastFreeIndexPlusOne = 0; } else { PVR_ASSERT(psBase->bPurgingEnabled); PVR_ASSERT(psBase->ui32FirstFreeIndex <= ui32NewCount); PVR_ASSERT(psBase->ui32TotalFreeHandCount - (ui32OldCount - ui32NewCount) < psBase->ui32TotalFreeHandCount); /* PRQA S 3382 1 */ /* ui32OldCount always >= ui32NewCount */ psBase->ui32TotalFreeHandCount -= (ui32OldCount - ui32NewCount); } } PVR_ASSERT(psBase->ui32FirstFreeIndex <= psBase->ui32TotalHandCount); return PVRSRV_OK; error: PVR_ASSERT(eError != PVRSRV_OK); if (psNewArray != IMG_NULL) { /* Free any new handle structures that were allocated */ for (ui32Index = ui32OldCount; ui32Index < ui32NewCount; ui32Index += HANDLE_BLOCK_SIZE) { HANDLE_BLOCK *psHandleBlock = BLOCK_ARRAY_AND_INDEX_TO_HANDLE_BLOCK(psNewArray, ui32Index); if (psHandleBlock->psHandleDataArray != IMG_NULL) { OSFreeMem(psHandleBlock->psHandleDataArray); } } /* Free new handle array */ OSFreeMem(psNewArray); } return eError; }