static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDUMPFlags, IMG_BOOL bPDump) { #if defined(PDUMP) || defined(EMULATOR) IMG_UINT32 ui32ReadRegister; #if defined(SGX_FEATURE_MP) ui32ReadRegister = EUR_CR_MASTER_SOFT_RESET; #else ui32ReadRegister = EUR_CR_SOFT_RESET; #endif #endif #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif OSWaitus(100 * 1000000 / psDevInfo->ui32CoreClockSpeed); if (bPDump) { PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags); #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Read back to flush the register writes\r\n"); PDumpRegRead(SGX_PDUMPREG_NAME, ui32ReadRegister, ui32PDUMPFlags); #endif } #if defined(EMULATOR) OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32ReadRegister); #endif }
static int sec_gpu_clock_enable(void) { int err = 0; /* adonis must be set parent function after runtime pm resume */ err = gpu_clock_set_parent(); if (err) { return err; } /* if setting wakeup lock clock, resume clock using that*/ /* if different with current clock and default cleck, need to set clock*/ if (gpu_clock_get() != sec_gpu_setting_clock) gpu_clock_set(sec_gpu_setting_clock); if (gpu_voltage_get() != sec_gpu_setting_voltage) gpu_voltage_set(sec_gpu_setting_voltage); if (sec_wakeup_lock_state) { if (gpu_voltage_get() < WAKEUP_LOCK_VOLTAGE + gpu_voltage_marin) gpu_voltage_set(WAKEUP_LOCK_VOLTAGE + gpu_voltage_marin); if (gpu_clock_get() < WAKEUP_LOCK_CLOCK) gpu_clock_set(WAKEUP_LOCK_CLOCK); } err = gpu_clock_enable(); if (err) { return err; } /* wait for more than 10 clocks to proper reset SGX core */ OSWaitus(1); return err; }
static IMG_VOID Init_IRQ_CTRL(PLAT_DATA *psPlatData) { IMG_CPU_PHYADDR sICRCpuPBase; IMG_VOID *pvICRCpuVBase; IMG_UINT32 ui32Value; /* Map into CPU address space */ sICRCpuPBase.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psPlatData->uiICRCpuPAddr); pvICRCpuVBase = OSMapPhysToLin(sICRCpuPBase, psPlatData->uiICRSize, 0); /* Configure IRQ_CTRL: For Rogue, enable IRQ, set sense to active low */ ui32Value = OSReadHWReg32(pvICRCpuVBase, EMULATOR_RGX_ICR_REG_IRQ_CTRL); ui32Value &= ~EMULATOR_RGX_ICR_REG_IRQ_CTRL_IRQ_HINLO; ui32Value |= EMULATOR_RGX_ICR_REG_IRQ_CTRL_IRQ_EN; OSWriteHWReg32(pvICRCpuVBase,EMULATOR_RGX_ICR_REG_IRQ_CTRL, ui32Value); /* Flush register write */ (void) OSReadHWReg32(pvICRCpuVBase, EMULATOR_RGX_ICR_REG_IRQ_CTRL); OSWaitus(10); PVR_TRACE(("Emulator FPGA image version (ICR_REG_CORE_REVISION): 0x%08x", OSReadHWReg32(pvICRCpuVBase, EMULATOR_RGX_ICR_REG_CORE_REVISION))); /* Unmap from CPU address space */ OSUnMapPhysToLin(pvICRCpuVBase, psPlatData->uiICRSize, 0); }
IMG_EXPORT PVRSRV_ERROR PVRSRVPowerLock(IMG_UINT32 ui32CallerID, IMG_BOOL bSystemPowerEvent) { PVRSRV_ERROR eError; SYS_DATA *psSysData; IMG_UINT32 ui32Timeout = 1000000; #if defined(SUPPORT_LMA) ui32Timeout *= 60; #endif SysAcquireData(&psSysData); #if defined(SYS_CUSTOM_POWERLOCK_WRAP) eError = SysPowerLockWrap(psSysData); if (eError != PVRSRV_OK) { return eError; } #endif do { eError = OSLockResource(&psSysData->sPowerStateChangeResource, ui32CallerID); if (eError == PVRSRV_OK) { break; } else if (ui32CallerID == ISR_ID) { eError = PVRSRV_ERROR_RETRY; break; } OSWaitus(1); ui32Timeout--; } while (ui32Timeout > 0); #if defined(SYS_CUSTOM_POWERLOCK_WRAP) if (eError != PVRSRV_OK) { SysPowerLockUnwrap(psSysData); } #endif if ((eError == PVRSRV_OK) && !bSystemPowerEvent && !_IsSystemStatePowered(psSysData->eCurrentPowerState)) { PVRSRVPowerUnlock(ui32CallerID); eError = PVRSRV_ERROR_RETRY; } return eError; }
/*********************************************************************************** Function Name : PVRSRVGetCPUFreq Inputs : None Outputs : None Returns : CPU timer frequency Description : Returns the CPU timer frequency ************************************************************************************/ static IMG_UINT32 PVRSRVGetCPUFreq(IMG_VOID) { IMG_UINT32 ui32Time1, ui32Time2; ui32Time1 = PVRSRVTimeNow(); OSWaitus(1000000); ui32Time2 = PVRSRVTimeNow(); PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetCPUFreq: timer frequency = %d Hz", ui32Time2 - ui32Time1)); return (ui32Time2 - ui32Time1); }
static void SGXResetSleep(struct PVRSRV_SGXDEV_INFO *psDevInfo, u32 ui32PDUMPFlags, IMG_BOOL bPDump) { #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif OSWaitus(1000 * 1000000 / psDevInfo->ui32CoreClockSpeed); if (bPDump) { PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags); #if defined(PDUMP) PDumpRegRead(EUR_CR_SOFT_RESET, ui32PDUMPFlags); #endif } }
IMG_EXPORT PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) { RGXFWIF_KCCB_CMD sFlushCmd; PVRSRV_ERROR eError = PVRSRV_OK; #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush"); #endif sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE; sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE; sFlushCmd.uCmdData.sSLCFlushInvalData.eDM = RGXFWIF_DM_CDM; sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, RGXFWIF_DM_GP, &sFlushCmd, sizeof(sFlushCmd), IMG_TRUE); if (eError != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); } END_LOOP_UNTIL_TIMEOUT(); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Failed to schedule SLC flush command with error (%u)", eError)); } else { /* Wait for the SLC flush to complete */ eError = RGXWaitForFWOp(psComputeContext->psDeviceNode->pvDevice, RGXFWIF_DM_GP, psComputeContext->psSync, IMG_TRUE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Compute flush aborted with error (%u)", eError)); } } return eError; }
enum PVRSRV_ERROR PollForValueKM(u32 __iomem *pui32LinMemAddr, u32 ui32Value, u32 ui32Mask, u32 ui32Waitus, u32 ui32Tries) { u32 uiMaxTime; uiMaxTime = ui32Tries * ui32Waitus; LOOP_UNTIL_TIMEOUT(uiMaxTime) { if ((readl(pui32LinMemAddr) & ui32Mask) == ui32Value) return PVRSRV_OK; OSWaitus(ui32Waitus); } END_LOOP_UNTIL_TIMEOUT(); return PVRSRV_ERROR_GENERIC; }
IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr, IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Timeoutus, IMG_UINT32 ui32PollPeriodus, IMG_BOOL bAllowPreemption) { { IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; if (bAllowPreemption) { PVR_ASSERT(ui32PollPeriodus >= 1000); } LOOP_UNTIL_TIMEOUT(ui32Timeoutus) { ui32ActualValue = (*pui32LinMemAddr & ui32Mask); if(ui32ActualValue == ui32Value) { return PVRSRV_OK; } if (bAllowPreemption) { OSSleepms(ui32PollPeriodus / 1000); } else { OSWaitus(ui32PollPeriodus); } } END_LOOP_UNTIL_TIMEOUT(); PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", ui32Value, ui32ActualValue, ui32Mask)); } return PVRSRV_ERROR_TIMEOUT; }
PVRSRV_ERROR PollForInterruptKM (IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Waitus, IMG_UINT32 ui32Tries) { IMG_UINT32 uiMaxTime; uiMaxTime = ui32Tries * ui32Waitus; LOOP_UNTIL_TIMEOUT(uiMaxTime) { if ((gui32EventStatusServicesByISR & ui32Mask) == ui32Value) { gui32EventStatusServicesByISR = 0; return PVRSRV_OK; } OSWaitus(ui32Waitus); } END_LOOP_UNTIL_TIMEOUT(); return PVRSRV_ERROR_GENERIC; }
IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr, IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Waitus, IMG_UINT32 ui32Tries) { { IMG_UINT32 uiMaxTime = ui32Tries * ui32Waitus; LOOP_UNTIL_TIMEOUT(uiMaxTime) { if((*pui32LinMemAddr & ui32Mask) == ui32Value) { return PVRSRV_OK; } OSWaitus(ui32Waitus); } END_LOOP_UNTIL_TIMEOUT(); } return PVRSRV_ERROR_GENERIC; }
/* * PVRSRVSubmitTQ3DKickKM */ IMG_EXPORT PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, IMG_UINT32 ui32PrepareCount, IMG_UINT32 *paui32ClientFenceCount, PRGXFWIF_UFO_ADDR **papauiClientFenceUFOAddress, IMG_UINT32 **papaui32ClientFenceValue, IMG_UINT32 *paui32ClientUpdateCount, PRGXFWIF_UFO_ADDR **papauiClientUpdateUFOAddress, IMG_UINT32 **papaui32ClientUpdateValue, IMG_UINT32 *paui32ServerSyncCount, IMG_UINT32 **papaui32ServerSyncFlags, SERVER_SYNC_PRIMITIVE ***papapsServerSyncs, IMG_UINT32 ui32NumFenceFDs, IMG_INT32 *paui32FenceFDs, IMG_UINT32 *paui32FWCommandSize, IMG_UINT8 **papaui8FWCommand, IMG_UINT32 *pui32TQPrepareFlags) { PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; #if defined(WDDM) RGX_CCB_CMD_HELPER_DATA as3DCmdHelper[TQ_MAX_PREPARES_PER_SUBMIT]; RGX_CCB_CMD_HELPER_DATA as2DCmdHelper[TQ_MAX_PREPARES_PER_SUBMIT]; #endif RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper; RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper; IMG_UINT32 ui323DCmdCount = 0; IMG_UINT32 ui322DCmdCount = 0; IMG_BOOL bKick2D = IMG_FALSE; IMG_BOOL bKick3D = IMG_FALSE; IMG_BOOL bPDumpContinuous = IMG_FALSE; IMG_UINT32 i; IMG_UINT32 ui32IntClientFenceCount = 0; PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = IMG_NULL; IMG_UINT32 *paui32IntFenceValue = IMG_NULL; IMG_UINT32 ui32IntClientUpdateCount = 0; PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = IMG_NULL; IMG_UINT32 *paui32IntUpdateValue = IMG_NULL; PVRSRV_ERROR eError; PVRSRV_ERROR eError2; #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) IMG_BOOL bSyncsMerged = IMG_FALSE; #endif if (ui32PrepareCount == 0) { return PVRSRV_ERROR_INVALID_PARAMS; } if (ui32NumFenceFDs != 0) { #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) /* Fence FD's are only valid in the 3D case with no batching */ if ((ui32PrepareCount !=1) && (!TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[0], 3D))) { return PVRSRV_ERROR_INVALID_PARAMS; } #else /* We only support Fence FD's if built with PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC */ return PVRSRV_ERROR_INVALID_PARAMS; #endif } #if defined(WDDM) pas3DCmdHelper = &as3DCmdHelper; pas2DCmdHelper = &as2DCmdHelper; #else /* We can't allocate the required amount of stack space on all consumer architectures */ pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount); if (pas3DCmdHelper == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto fail_alloc3dhelper; } pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount); if (pas2DCmdHelper == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto fail_alloc2dhelper; } #endif /* Ensure we do the right thing for server syncs which cross call bounderies */ for (i=0;i<ui32PrepareCount;i++) { IMG_BOOL bHaveStartPrepare = pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_START; IMG_BOOL bHaveEndPrepare = IMG_FALSE; if (bHaveStartPrepare) { IMG_UINT32 k; /* We've at the start of a transfer operation (which might be made up of multiple HW operations) so check if we also have then end of the transfer operation in the batch */ for (k=i;k<ui32PrepareCount;k++) { if (pui32TQPrepareFlags[k] & TQ_PREP_FLAGS_END) { bHaveEndPrepare = IMG_TRUE; break; } } if (!bHaveEndPrepare) { /* We don't have the complete command passed in this call so drop the update request. When we get called again with the last HW command in this transfer operation we'll do the update at that point. */ for (k=0;k<paui32ServerSyncCount[i];k++) { papaui32ServerSyncFlags[i][k] &= ~PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE; } } } } /* Init the command helper commands for all the prepares */ for (i=0;i<ui32PrepareCount;i++) { RGX_CLIENT_CCB *psClientCCB; RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx; IMG_CHAR *pszCommandName; RGX_CCB_CMD_HELPER_DATA *psCmdHelper; RGXFWIF_CCB_CMD_TYPE eType; if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D)) { psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext; psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); pszCommandName = "TQ-3D"; psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++]; eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D; } else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D)) { psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext; psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); pszCommandName = "TQ-2D"; psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++]; eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D; } else { eError = PVRSRV_ERROR_INVALID_PARAMS; goto fail_cmdtype; } if (i == 0) { bPDumpContinuous = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) == TQ_PREP_FLAGS_PDUMPCONTINUOUS); PDUMPCOMMENTWITHFLAGS((bPDumpContinuous) ? PDUMP_FLAGS_CONTINUOUS : 0, "%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr); } else { IMG_BOOL bNewPDumpContinuous = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) == TQ_PREP_FLAGS_PDUMPCONTINUOUS); if (bNewPDumpContinuous != bPDumpContinuous) { eError = PVRSRV_ERROR_INVALID_PARAMS; PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __FUNCTION__)); goto fail_pdumpcheck; } } #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) if (ui32NumFenceFDs) { IMG_UINT32 ui32NumFenceSyncs; PRGXFWIF_UFO_ADDR *puiFenceFWAddrs; IMG_UINT32 *pui32FenceValues; IMG_UINT32 ui32NumUpdateSyncs; PRGXFWIF_UFO_ADDR *puiUpdateFWAddrs; IMG_UINT32 *pui32UpdateValues; /* FIXME: We can't be taking the server sync operations here as we have no way to undo them should the acquire fail. If client/local syncs where used here would that remove the issue? */ eError = PVRFDSyncQueryFencesKM(ui32NumFenceFDs, paui32FenceFDs, IMG_TRUE, &ui32NumFenceSyncs, &puiFenceFWAddrs, &pui32FenceValues, &ui32NumUpdateSyncs, &puiUpdateFWAddrs, &pui32UpdateValues); if (eError != PVRSRV_OK) { goto fail_fdsync; } /* Merge the Android syncs and the client syncs together */ ui32IntClientFenceCount = paui32ClientFenceCount[i] + ui32NumFenceSyncs; pauiIntFenceUFOAddress = OSAllocMem(sizeof(*pauiIntFenceUFOAddress)* ui32IntClientFenceCount); if (pauiIntFenceUFOAddress == IMG_NULL) { /* Free memory created by PVRFDSyncQueryFencesKM */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); goto fail_fenceUFOarray; } paui32IntFenceValue = OSAllocMem(sizeof(*paui32IntFenceValue)* ui32IntClientFenceCount); if (paui32IntFenceValue == IMG_NULL) { /* Free memory created by PVRFDSyncQueryFencesKM */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); OSFreeMem(pauiIntFenceUFOAddress); goto fail_fencevaluearray; } ui32IntClientUpdateCount = paui32ClientUpdateCount[i] + ui32NumUpdateSyncs; pauiIntUpdateUFOAddress = OSAllocMem(sizeof(*pauiIntUpdateUFOAddress)* ui32IntClientUpdateCount); if (pauiIntUpdateUFOAddress == IMG_NULL) { /* Free memory created by PVRFDSyncQueryFencesKM */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); OSFreeMem(pauiIntFenceUFOAddress); OSFreeMem(paui32IntFenceValue); goto fail_updateUFOarray; } paui32IntUpdateValue = OSAllocMem(sizeof(*paui32IntUpdateValue)* ui32IntClientUpdateCount); if (paui32IntUpdateValue == IMG_NULL) { /* Free memory created by PVRFDSyncQueryFencesKM */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); OSFreeMem(pauiIntFenceUFOAddress); OSFreeMem(paui32IntFenceValue); OSFreeMem(pauiIntUpdateUFOAddress); goto fail_updatevaluearray; } SYNC_MERGE_CLIENT_FENCES(ui32IntClientFenceCount, pauiIntFenceUFOAddress, paui32IntFenceValue, ui32NumFenceSyncs, puiFenceFWAddrs, pui32FenceValues, paui32ClientFenceCount[i], papauiClientFenceUFOAddress[i], papaui32ClientFenceValue[i]); SYNC_MERGE_CLIENT_UPDATES(ui32IntClientUpdateCount, pauiIntUpdateUFOAddress, paui32IntUpdateValue, ui32NumUpdateSyncs, puiUpdateFWAddrs, pui32UpdateValues, paui32ClientUpdateCount[i], papauiClientUpdateUFOAddress[i], papaui32ClientUpdateValue[i]); if (ui32NumFenceSyncs || ui32NumUpdateSyncs) { PDUMPCOMMENT("(TQ) Android native fences in use: %u fence syncs, %u update syncs", ui32NumFenceSyncs, ui32NumUpdateSyncs); } /* Free the data created by PVRFDSyncQueryFencesKM as it has now been merged into *IntClient* */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); bSyncsMerged = IMG_TRUE; } else #endif /* PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC */ { /* No client sync merging so just copy across the pointers */ ui32IntClientFenceCount = paui32ClientFenceCount[i]; pauiIntFenceUFOAddress = papauiClientFenceUFOAddress[i]; paui32IntFenceValue = papaui32ClientFenceValue[i]; ui32IntClientUpdateCount = paui32ClientUpdateCount[i]; pauiIntUpdateUFOAddress = papauiClientUpdateUFOAddress[i]; paui32IntUpdateValue = papaui32ClientUpdateValue[i]; } /* Create the command helper data for this command */ eError = RGXCmdHelperInitCmdCCB(psClientCCB, ui32IntClientFenceCount, pauiIntFenceUFOAddress, paui32IntFenceValue, ui32IntClientUpdateCount, pauiIntUpdateUFOAddress, paui32IntUpdateValue, paui32ServerSyncCount[i], papaui32ServerSyncFlags[i], papapsServerSyncs[i], paui32FWCommandSize[i], papaui8FWCommand[i], eType, bPDumpContinuous, pszCommandName, psCmdHelper); if (eError != PVRSRV_OK) { goto fail_initcmd; } } /* Acquire space for all the commands in one go */ if (ui323DCmdCount) { eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, &pas3DCmdHelper[0], &bKick3D); if (eError != PVRSRV_OK) { if (bKick3D) { ui323DCmdCount = 0; ui322DCmdCount = 0; } else { goto fail_3dcmdacquire; } } } if (ui322DCmdCount) { eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount, &pas2DCmdHelper[0], &bKick2D); if (eError != PVRSRV_OK) { if (bKick2D || bKick3D) { ui323DCmdCount = 0; ui322DCmdCount = 0; } else { goto fail_2dcmdacquire; } } } /* We should acquire the kernel CCB(s) space here as the schedule could fail and we would have to roll back all the syncs */ /* Only do the command helper release (which takes the server sync operations if the acquire succeeded */ if (ui323DCmdCount) { RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, &pas3DCmdHelper[0], "TQ_3D", FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr); } if (ui322DCmdCount) { RGXCmdHelperReleaseCmdCCB(ui322DCmdCount, &pas2DCmdHelper[0], "TQ_2D", FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr); } /* Even if we failed to acquire the client CCB space we might still need to kick the HW to process a padding packet to release space for us next time round */ if (bKick3D) { RGXFWIF_KCCB_CMD s3DKCCBCmd; /* Construct the kernel 3D CCB command. */ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext); s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext)); s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) { eError2 = RGXScheduleCommand(psDeviceNode->pvDevice, RGXFWIF_DM_3D, &s3DKCCBCmd, sizeof(s3DKCCBCmd), bPDumpContinuous); if (eError2 != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); } END_LOOP_UNTIL_TIMEOUT(); } if (bKick2D) { RGXFWIF_KCCB_CMD s2DKCCBCmd; /* Construct the kernel 3D CCB command. */ s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext); s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext)); s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) { eError2 = RGXScheduleCommand(psDeviceNode->pvDevice, RGXFWIF_DM_2D, &s2DKCCBCmd, sizeof(s2DKCCBCmd), bPDumpContinuous); if (eError2 != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); } END_LOOP_UNTIL_TIMEOUT(); } /* * Now check eError (which may have returned an error from our earlier calls * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first * so we check it now... */ if (eError != PVRSRV_OK ) { goto fail_2dcmdacquire; } #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) /* Free the merged sync memory if required */ if (bSyncsMerged) { OSFreeMem(pauiIntFenceUFOAddress); OSFreeMem(paui32IntFenceValue); OSFreeMem(pauiIntUpdateUFOAddress); OSFreeMem(paui32IntUpdateValue); } #if defined(NO_HARDWARE) for (i = 0; i < ui32NumFenceFDs; i++) { eError = PVRFDSyncNoHwUpdateFenceKM(paui32FenceFDs[i]); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed nohw update on fence fd=%d (%s)", __func__, paui32FenceFDs[i], PVRSRVGetErrorStringKM(eError))); } } #endif #endif #if !defined(WDDM) OSFreeMem(pas2DCmdHelper); OSFreeMem(pas3DCmdHelper); #endif return PVRSRV_OK; /* No resources are created in this function so there is nothing to free unless we had to merge syncs. If we fail after the client CCB acquire there is still nothing to do as only the client CCB release will modify the client CCB */ fail_2dcmdacquire: fail_3dcmdacquire: fail_initcmd: #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) if (bSyncsMerged) { OSFreeMem(pauiIntFenceUFOAddress); OSFreeMem(paui32IntFenceValue); OSFreeMem(pauiIntUpdateUFOAddress); OSFreeMem(paui32IntUpdateValue); } fail_updatevaluearray: fail_updateUFOarray: fail_fencevaluearray: fail_fenceUFOarray: fail_fdsync: #endif fail_pdumpcheck: fail_cmdtype: PVR_ASSERT(eError != PVRSRV_OK); #if !defined(WDDM) OSFreeMem(pas2DCmdHelper); fail_alloc2dhelper: OSFreeMem(pas3DCmdHelper); fail_alloc3dhelper: #endif return eError; }
IMG_EXPORT PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, IMG_UINT32 ui32ClientFenceCount, PRGXFWIF_UFO_ADDR *pauiClientFenceUFOAddress, IMG_UINT32 *paui32ClientFenceValue, IMG_UINT32 ui32ClientUpdateCount, PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress, IMG_UINT32 *paui32ClientUpdateValue, IMG_UINT32 ui32ServerSyncPrims, IMG_UINT32 *paui32ServerSyncFlags, SERVER_SYNC_PRIMITIVE **pasServerSyncs, IMG_UINT32 ui32CmdSize, IMG_PBYTE pui8DMCmd, IMG_BOOL bPDumpContinuous) { RGXFWIF_KCCB_CMD sCmpKCCBCmd; RGX_CCB_CMD_HELPER_DATA sCmdHelperData; IMG_BOOL bKickRequired; PVRSRV_ERROR eError; PVRSRV_ERROR eError2; IMG_UINT32 i; /* Sanity check the server fences */ for (i=0;i<ui32ServerSyncPrims;i++) { if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)) { PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on SH) must fence", __FUNCTION__)); return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP; } } eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext), ui32ClientFenceCount, pauiClientFenceUFOAddress, paui32ClientFenceValue, ui32ClientUpdateCount, pauiClientUpdateUFOAddress, paui32ClientUpdateValue, ui32ServerSyncPrims, paui32ServerSyncFlags, pasServerSyncs, ui32CmdSize, pui8DMCmd, RGXFWIF_CCB_CMD_TYPE_CDM, bPDumpContinuous, "Compute", &sCmdHelperData); if (eError != PVRSRV_OK) { goto fail_cmdinit; } eError = RGXCmdHelperAcquireCmdCCB(1, &sCmdHelperData, &bKickRequired); if ((eError != PVRSRV_OK) && (!bKickRequired)) { /* Only bail if no new data was submitted into the client CCB, we might have already submitted a padding packet which we should flush through the FW. */ PVR_DPF((PVR_DBG_ERROR, "RGXKickCDM: Failed to create client CCB command")); goto fail_cmdaquire; } /* We should reserved space in the kernel CCB here and fill in the command directly. This is so if there isn't space in the kernel CCB we can return with retry back to services client before we take any operations */ /* We might only be kicking for flush out a padding packet so only submit the command if the create was successful */ if (eError == PVRSRV_OK) { /* All the required resources are ready at this point, we can't fail so take the required server sync operations and commit all the resources */ RGXCmdHelperReleaseCmdCCB(1, &sCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr); } /* Construct the kernel compute CCB command. */ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext)); sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; /* * Submit the compute command to the firmware. */ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) { eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, RGXFWIF_DM_CDM, &sCmpKCCBCmd, sizeof(sCmpKCCBCmd), bPDumpContinuous); if (eError2 != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); } END_LOOP_UNTIL_TIMEOUT(); if (eError2 != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickCDMKM failed to schedule kernel CCB command. (0x%x)", eError)); } /* * Now check eError (which may have returned an error from our earlier call * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first * so we check it now... */ if (eError != PVRSRV_OK ) { goto fail_cmdaquire; } return PVRSRV_OK; fail_cmdaquire: fail_cmdinit: return eError; }
IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr, IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Timeoutus, IMG_UINT32 ui32PollPeriodus, IMG_BOOL bAllowPreemption) { #if defined (EMULATOR) { PVR_UNREFERENCED_PARAMETER(bAllowPreemption); #if !defined(__linux__) PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); #endif do { if((*pui32LinMemAddr & ui32Mask) == ui32Value) { return PVRSRV_OK; } #if defined(__linux__) OSWaitus(ui32PollPeriodus); #else OSReleaseThreadQuanta(); #endif } while (ui32Timeoutus); } #else { IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; if (bAllowPreemption) { PVR_ASSERT(ui32PollPeriodus >= 1000); } LOOP_UNTIL_TIMEOUT(ui32Timeoutus) { ui32ActualValue = (*pui32LinMemAddr & ui32Mask); if(ui32ActualValue == ui32Value) { return PVRSRV_OK; } if (bAllowPreemption) { OSSleepms(ui32PollPeriodus / 1000); } else { OSWaitus(ui32PollPeriodus); } } END_LOOP_UNTIL_TIMEOUT(); PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", ui32Value, ui32ActualValue, ui32Mask)); } #endif return PVRSRV_ERROR_TIMEOUT; }
enum PVRSRV_ERROR PVRSRVSwapToDCBufferKM(void *hDeviceKM, void *hBuffer, u32 ui32SwapInterval, void *hPrivateTag, u32 ui32ClipRectCount, struct IMG_RECT *psClipRect) { enum PVRSRV_ERROR eError; struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo; struct PVRSRV_DC_BUFFER *psBuffer; struct PVRSRV_QUEUE_INFO *psQueue; struct DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; u32 i; u32 ui32NumSrcSyncs = 1; struct PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2]; struct PVRSRV_COMMAND *psCommand; if (!hDeviceKM || !hBuffer || !psClipRect) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVSwapToDCBufferKM: Invalid parameters"); return PVRSRV_ERROR_INVALID_PARAMS; } psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); psBuffer = (struct PVRSRV_DC_BUFFER *)hBuffer; psQueue = psBuffer->psSwapChain->psQueue; apsSrcSync[0] = psBuffer->sDeviceClassBuffer.psKernelSyncInfo; if (psBuffer->psSwapChain->psLastFlipBuffer && psBuffer != psBuffer->psSwapChain->psLastFlipBuffer) { apsSrcSync[1] = psBuffer->psSwapChain->psLastFlipBuffer->sDeviceClassBuffer. psKernelSyncInfo; ui32NumSrcSyncs++; } eError = PVRSRVInsertCommandKM(psQueue, &psCommand, psDCInfo->ui32DeviceID, DC_FLIP_COMMAND, 0, NULL, ui32NumSrcSyncs, apsSrcSync, sizeof(struct DISPLAYCLASS_FLIP_COMMAND) + (sizeof(struct IMG_RECT) * ui32ClipRectCount)); if (eError != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVSwapToDCBufferKM: Failed to get space in queue"); goto Exit; } psFlipCmd = (struct DISPLAYCLASS_FLIP_COMMAND *)psCommand->pvData; psFlipCmd->hExtDevice = psDCInfo->hExtDevice; psFlipCmd->hExtSwapChain = psBuffer->psSwapChain->hExtSwapChain; psFlipCmd->hExtBuffer = psBuffer->sDeviceClassBuffer.hExtBuffer; psFlipCmd->hPrivateTag = hPrivateTag; psFlipCmd->ui32ClipRectCount = ui32ClipRectCount; psFlipCmd->psClipRect = (struct IMG_RECT *)((u8 *) psFlipCmd + sizeof(struct DISPLAYCLASS_FLIP_COMMAND)); for (i = 0; i < ui32ClipRectCount; i++) psFlipCmd->psClipRect[i] = psClipRect[i]; psFlipCmd->ui32SwapInterval = ui32SwapInterval; eError = PVRSRVSubmitCommandKM(psQueue, psCommand); if (eError != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVSwapToDCBufferKM: Failed to submit command"); goto Exit; } LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) { if (PVRSRVProcessQueues(IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED) { goto ProcessedQueues; } OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); } END_LOOP_UNTIL_TIMEOUT(); PVR_DPF(PVR_DBG_ERROR, "PVRSRVSwapToDCBufferKM: Failed to process queues"); eError = PVRSRV_ERROR_GENERIC; goto Exit; ProcessedQueues: psBuffer->psSwapChain->psLastFlipBuffer = psBuffer; Exit: return eError; }
enum PVRSRV_ERROR PVRSRVSwapToDCSystemKM(void *hDeviceKM, void *hSwapChain) { enum PVRSRV_ERROR eError; struct PVRSRV_QUEUE_INFO *psQueue; struct PVRSRV_DISPLAYCLASS_INFO *psDCInfo; struct PVRSRV_DC_SWAPCHAIN *psSwapChain; struct DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; IMG_BOOL bStart = IMG_FALSE; u32 uiStart = 0; u32 ui32NumSrcSyncs = 1; struct PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2]; struct PVRSRV_COMMAND *psCommand; if (!hDeviceKM || !hSwapChain) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVSwapToDCSystemKM: Invalid parameters"); return PVRSRV_ERROR_INVALID_PARAMS; } psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); psSwapChain = (struct PVRSRV_DC_SWAPCHAIN *)hSwapChain; psQueue = psSwapChain->psQueue; apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo; if (psSwapChain->psLastFlipBuffer) { apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer. psKernelSyncInfo; ui32NumSrcSyncs++; } eError = PVRSRVInsertCommandKM(psQueue, &psCommand, psDCInfo->ui32DeviceID, DC_FLIP_COMMAND, 0, NULL, ui32NumSrcSyncs, apsSrcSync, sizeof(struct DISPLAYCLASS_FLIP_COMMAND)); if (eError != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVSwapToDCSystemKM: " "Failed to get space in queue"); goto Exit; } psFlipCmd = (struct DISPLAYCLASS_FLIP_COMMAND *)psCommand->pvData; psFlipCmd->hExtDevice = psDCInfo->hExtDevice; psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain; psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer; psFlipCmd->hPrivateTag = NULL; psFlipCmd->ui32ClipRectCount = 0; psFlipCmd->ui32SwapInterval = 1; eError = PVRSRVSubmitCommandKM(psQueue, psCommand); if (eError != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "PVRSRVSwapToDCSystemKM: Failed to submit command"); goto Exit; } do { if (PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED) goto ProcessedQueues; if (bStart == IMG_FALSE) { uiStart = OSClockus(); bStart = IMG_TRUE; } OSWaitus(MAX_HW_TIME_US / WAIT_TRY_COUNT); } while ((OSClockus() - uiStart) < MAX_HW_TIME_US); PVR_DPF(PVR_DBG_ERROR, "PVRSRVSwapToDCSystemKM: Failed to process queues"); eError = PVRSRV_ERROR_GENERIC; goto Exit; ProcessedQueues: psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer; eError = PVRSRV_OK; Exit: return eError; }
IMG_EXPORT PVRSRV_ERROR PVRSRVSwapToDCSystemKM(IMG_HANDLE hDeviceKM, IMG_HANDLE hSwapChain) { PVRSRV_ERROR eError; PVRSRV_QUEUE_INFO *psQueue; PVRSRV_DISPLAYCLASS_INFO *psDCInfo; PVRSRV_DC_SWAPCHAIN *psSwapChain; DISPLAYCLASS_FLIP_COMMAND *psFlipCmd; IMG_UINT32 ui32NumSrcSyncs = 1; PVRSRV_KERNEL_SYNC_INFO *apsSrcSync[2]; PVRSRV_COMMAND *psCommand; if(!hDeviceKM || !hSwapChain) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Invalid parameters")); return PVRSRV_ERROR_INVALID_PARAMS; } #if defined(SUPPORT_LMA) eError = PVRSRVPowerLock(KERNEL_ID, IMG_FALSE); if(eError != PVRSRV_OK) { return eError; } #endif psDCInfo = DCDeviceHandleToDCInfo(hDeviceKM); psSwapChain = (PVRSRV_DC_SWAPCHAIN*)hSwapChain; psQueue = psSwapChain->psQueue; apsSrcSync[0] = psDCInfo->sSystemBuffer.sDeviceClassBuffer.psKernelSyncInfo; if(psSwapChain->psLastFlipBuffer) { if (apsSrcSync[0] != psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo) { apsSrcSync[1] = psSwapChain->psLastFlipBuffer->sDeviceClassBuffer.psKernelSyncInfo; ui32NumSrcSyncs++; } } eError = PVRSRVInsertCommandKM (psQueue, &psCommand, psDCInfo->ui32DeviceID, DC_FLIP_COMMAND, 0, IMG_NULL, ui32NumSrcSyncs, apsSrcSync, sizeof(DISPLAYCLASS_FLIP_COMMAND)); if(eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to get space in queue")); goto Exit; } psFlipCmd = (DISPLAYCLASS_FLIP_COMMAND*)psCommand->pvData; psFlipCmd->hExtDevice = psDCInfo->hExtDevice; psFlipCmd->hExtSwapChain = psSwapChain->hExtSwapChain; psFlipCmd->hExtBuffer = psDCInfo->sSystemBuffer.sDeviceClassBuffer.hExtBuffer; psFlipCmd->hPrivateTag = IMG_NULL; psFlipCmd->ui32ClipRectCount = 0; psFlipCmd->ui32SwapInterval = 1; eError = PVRSRVSubmitCommandKM (psQueue, psCommand); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to submit command")); goto Exit; } LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) { if(PVRSRVProcessQueues(KERNEL_ID, IMG_FALSE) != PVRSRV_ERROR_PROCESSING_BLOCKED) { goto ProcessedQueues; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); } END_LOOP_UNTIL_TIMEOUT(); PVR_DPF((PVR_DBG_ERROR,"PVRSRVSwapToDCSystemKM: Failed to process queues")); eError = PVRSRV_ERROR_GENERIC; goto Exit; ProcessedQueues: psSwapChain->psLastFlipBuffer = &psDCInfo->sSystemBuffer; eError = PVRSRV_OK; Exit: #if defined(SUPPORT_LMA) PVRSRVPowerUnlock(KERNEL_ID); #endif return eError; }
/* EmuReset */ static PVRSRV_ERROR EmuReset(IMG_CPU_PHYADDR sRegsCpuPBase) { IMG_CPU_PHYADDR sWrapperRegsCpuPBase; IMG_VOID *pvWrapperRegs; IMG_UINT32 ui32MemLatency; sWrapperRegsCpuPBase.uiAddr = sRegsCpuPBase.uiAddr + EMULATOR_RGX_REG_WRAPPER_OFFSET; /* Create a temporary mapping of the wrapper registers in order to reset the emulator design. */ pvWrapperRegs = OSMapPhysToLin(sWrapperRegsCpuPBase, EMULATOR_RGX_REG_WRAPPER_SIZE, 0); if (pvWrapperRegs == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR,"EmuReset: Failed to create wrapper register mapping\n")); return PVRSRV_ERROR_BAD_MAPPING; } /* Set the memory latency. This needs to be done before the soft reset to ensure it applies to all aspects of the emulator. */ ui32MemLatency = EmuMemLatencyGet(); if (ui32MemLatency != 0) { PVR_LOG(("EmuReset: Mem latency = 0x%X", ui32MemLatency)); } OSWriteHWReg32(pvWrapperRegs, EMU_CR_MEMORY_LATENCY, ui32MemLatency); (void) OSReadHWReg32(pvWrapperRegs, EMU_CR_MEMORY_LATENCY); /* Emu reset. */ OSWriteHWReg32(pvWrapperRegs, EMU_CR_SOFT_RESET, EMU_CR_SOFT_RESET_SYS_EN|EMU_CR_SOFT_RESET_MEM_EN|EMU_CR_SOFT_RESET_CORE_EN); /* Flush register write */ (void) OSReadHWReg32(pvWrapperRegs, EMU_CR_SOFT_RESET); OSWaitus(10); OSWriteHWReg32(pvWrapperRegs, EMU_CR_SOFT_RESET, 0x0); /* Flush register write */ (void) OSReadHWReg32(pvWrapperRegs, EMU_CR_SOFT_RESET); OSWaitus(10); #if !defined(LMA) /* If we're UMA then enable bus mastering */ OSWriteHWReg32(pvWrapperRegs, EMU_CR_PCI_MASTER, EMU_CR_PCI_MASTER_MODE_EN); #else /* otherwise disable it: the emu regbank is not resetable */ OSWriteHWReg32(pvWrapperRegs, EMU_CR_PCI_MASTER, 0x0); #endif /* Flush register write */ (void) OSReadHWReg32(pvWrapperRegs, EMU_CR_PCI_MASTER); /* Remove the temporary register mapping. */ OSUnMapPhysToLin(pvWrapperRegs, EMULATOR_RGX_REG_WRAPPER_SIZE, 0); return PVRSRV_OK; }
IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32* pui32LinMemAddr, IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, IMG_UINT32 ui32Timeoutus, IMG_UINT32 ui32PollPeriodus, IMG_BOOL bAllowPreemption) { #if defined (EMULATOR) { PVR_UNREFERENCED_PARAMETER(bAllowPreemption); #if !defined(__linux__) PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); #endif /* For the Emulator we want the system to stop when a lock-up is detected so the state can be analysed. * Also the Emulator is much slower than real silicon so timeouts are not valid. */ do { if((*pui32LinMemAddr & ui32Mask) == ui32Value) { return PVRSRV_OK; } #if defined(__linux__) OSWaitus(ui32PollPeriodus); #else OSReleaseThreadQuanta(); #endif } while (ui32Timeoutus); /* Endless loop only for the Emulator */ } #else { IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */ if (bAllowPreemption) { PVR_ASSERT(ui32PollPeriodus >= 1000); } /* PRQA S 3415,4109 1 */ /* macro format critical - leave alone */ LOOP_UNTIL_TIMEOUT(ui32Timeoutus) { ui32ActualValue = (*pui32LinMemAddr & ui32Mask); if(ui32ActualValue == ui32Value) { return PVRSRV_OK; } if (bAllowPreemption) { OSSleepms(ui32PollPeriodus / 1000); } else { OSWaitus(ui32PollPeriodus); } } END_LOOP_UNTIL_TIMEOUT(); PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", ui32Value, ui32ActualValue, ui32Mask)); } #endif /* #if defined (EMULATOR) */ return PVRSRV_ERROR_TIMEOUT; }