/*! ******************************************************************************* @Function SGXResetSetupBIFContexts @Description Configure the BIF for the EDM context @Input psDevInfo - SGX Device Info @Return IMG_VOID ******************************************************************************/ static IMG_VOID SGXResetSetupBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDUMPFlags) { IMG_UINT32 ui32RegVal; #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif /* PDUMP */ #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) /* Set up EDM for bank 0 to point at kernel context */ ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT); #if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA) /* Set up 2D core for bank 0 to point at kernel context */ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT); #endif /* SGX_FEATURE_2D_HARDWARE */ #if defined(FIX_HW_BRN_23410) /* Set up TA core for bank 0 to point at kernel context to guarantee it is a valid context */ ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT); #endif /* FIX_HW_BRN_23410 */ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Set up EDM requestor page table in BIF\r\n"); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); #endif /* defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) */ { IMG_UINT32 ui32EDMDirListReg; /* Set up EDM context with kernel page directory */ #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0) ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0; #else /* Bases 0 and 1 are not necessarily contiguous */ ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1); #endif /* SGX_BIF_DIR_LIST_INDEX_EDM */ ui32RegVal = psDevInfo->sKernelPDDevPAddr.uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT; #if defined(FIX_HW_BRN_28011) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, ui32RegVal); PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the EDM's directory list base\r\n"); PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, ui32EDMDirListReg, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); } }
static IMG_VOID SGXResetSetupBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDUMPFlags) { IMG_UINT32 ui32RegVal; #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT); #if defined(SGX_FEATURE_2D_HARDWARE) && !defined(SGX_FEATURE_PTLA) ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT); #endif #if defined(FIX_HW_BRN_23410) ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT); #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Set up EDM requestor page table in BIF\r\n"); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); #endif { IMG_UINT32 ui32EDMDirListReg; #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0) ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0; #else ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1); #endif ui32RegVal = psDevInfo->sKernelPDDevPAddr.uiAddr >> SGX_MMU_PDE_ADDR_ALIGNSHIFT; #if defined(FIX_HW_BRN_28011) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, ui32RegVal); PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the EDM's directory list base\r\n"); PDUMPPDREGWITHFLAGS(&psDevInfo->sMMUAttrib, ui32EDMDirListReg, ui32RegVal, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); } }
static PVRSRV_ERROR _RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize, IMG_PVOID *ppvBufferSpace) { IMG_UINT32 ui32FreeSpace; #if defined(PDUMP) /* Wait for sufficient CCB space to become available */ PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", ui32CmdSize, psClientCCB->ui32HostWriteOffset, psClientCCB->szName); DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), psClientCCB->ui32HostWriteOffset, ui32CmdSize, psClientCCB->ui32Size); #endif ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, psClientCCB->psClientCCBCtrl->ui32ReadOffset, psClientCCB->ui32Size); /* Don't allow all the space to be used */ if (ui32FreeSpace > ui32CmdSize) { *ppvBufferSpace = (IMG_PVOID) (psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset); return PVRSRV_OK; } return PVRSRV_ERROR_RETRY; }
void PDumpTASignatureRegisters(u32 ui32DumpFrameNum, u32 ui32TAKickCount, IMG_BOOL bLastFrame, u32 *pui32Registers, u32 ui32NumRegisters) { u32 ui32FileOffset, ui32Flags; u32 i; __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(); ui32Flags = bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0; PDUMPCOMMENTWITHFLAGS(ui32Flags, "\r\n-- Dump TA signature registers\r\n"); snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "out%lu_ta.sig", ui32DumpFrameNum); ui32FileOffset = ui32TAKickCount * ui32NumRegisters * sizeof(u32); for (i = 0; i < ui32NumRegisters; i++) { PDumpReadRegKM(pszFile, ui32FileOffset, pui32Registers[i], sizeof(u32), ui32Flags); ui32FileOffset += sizeof(u32); } }
static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDUMPFlags, IMG_BOOL bPDump) { #if defined(PDUMP) || defined(EMULATOR) IMG_UINT32 ui32ReadRegister; #if defined(SGX_FEATURE_MP) ui32ReadRegister = EUR_CR_MASTER_SOFT_RESET; #else ui32ReadRegister = EUR_CR_SOFT_RESET; #endif #endif #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif OSWaitus(100 * 1000000 / psDevInfo->ui32CoreClockSpeed); if (bPDump) { PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags); #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Read back to flush the register writes\r\n"); PDumpRegRead(SGX_PDUMPREG_NAME, ui32ReadRegister, ui32PDUMPFlags); #endif } #if defined(EMULATOR) OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32ReadRegister); #endif }
/*! ******************************************************************************* @Function SGXResetInitBIFContexts @Description Initialise the BIF memory contexts @Input psDevInfo - SGX Device Info @Return IMG_VOID ******************************************************************************/ static IMG_VOID SGXResetInitBIFContexts(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDUMPFlags) { IMG_UINT32 ui32RegVal; #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif /* PDUMP */ ui32RegVal = 0; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF bank settings\r\n"); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); #endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Initialise the BIF directory list\r\n"); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags); #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) { IMG_UINT32 ui32DirList, ui32DirListReg; for (ui32DirList = 1; ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS; ui32DirList++) { ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1); OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, ui32DirListReg, ui32RegVal, ui32PDUMPFlags); } } #endif /* SGX_FEATURE_MULTIPLE_MEM_CONTEXTS */ }
IMG_EXPORT PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) { RGXFWIF_KCCB_CMD sFlushCmd; PVRSRV_ERROR eError = PVRSRV_OK; #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush"); #endif sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE; sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE; sFlushCmd.uCmdData.sSLCFlushInvalData.eDM = RGXFWIF_DM_CDM; sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) { eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, RGXFWIF_DM_GP, &sFlushCmd, sizeof(sFlushCmd), IMG_TRUE); if (eError != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); } END_LOOP_UNTIL_TIMEOUT(); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Failed to schedule SLC flush command with error (%u)", eError)); } else { /* Wait for the SLC flush to complete */ eError = RGXWaitForFWOp(psComputeContext->psDeviceNode->pvDevice, RGXFWIF_DM_GP, psComputeContext->psSync, IMG_TRUE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Compute flush aborted with error (%u)", eError)); } } return eError; }
void PDumpCounterRegisters(u32 ui32DumpFrameNum, IMG_BOOL bLastFrame, u32 *pui32Registers, u32 ui32NumRegisters) { u32 ui32FileOffset; u32 i; __PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(); PDUMPCOMMENTWITHFLAGS(bLastFrame ? PDUMP_FLAGS_LASTFRAME : 0, "\r\n-- Dump counter registers\r\n"); snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "out%lu.perf", ui32DumpFrameNum); ui32FileOffset = 0; for (i = 0; i < ui32NumRegisters; i++) PDumpCountRead(pszFile, pui32Registers[i], sizeof(u32), &ui32FileOffset, bLastFrame); }
enum PVRSRV_ERROR PDumpBitmapKM(char *pszFileName, u32 ui32FileOffset, u32 ui32Width, u32 ui32Height, u32 ui32StrideInBytes, struct IMG_DEV_VIRTADDR sDevBaseAddr, u32 ui32Size, enum PDUMP_PIXEL_FORMAT ePixelFormat, enum PDUMP_MEM_FORMAT eMemFormat, u32 ui32PDumpFlags) { __PDBG_PDUMP_STATE_GET_SCRIPT_STRING(PVRSRV_ERROR_GENERIC); PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "\r\n-- Dump bitmap of render\r\n"); snprintf(pszScript, SZ_SCRIPT_SIZE_MAX, "SII %s %s.bin :SGXMEM:v:0x%08lX 0x%08lX 0x%08lX 0x%08X " "0x%08lX 0x%08lX 0x%08lX 0x%08X\r\n", pszFileName, pszFileName, sDevBaseAddr.uiAddr, ui32Size, ui32FileOffset, ePixelFormat, ui32Width, ui32Height, ui32StrideInBytes, eMemFormat); PDumpWriteString2(pszScript, ui32PDumpFlags); return PVRSRV_OK; }
/*! ******************************************************************************* @Function SGXResetSleep @Description Sleep for a short time to allow reset register writes to complete. Required because no status registers are available to poll on. @Input psDevInfo - SGX Device Info @Input ui32PDUMPFlags - flags to control PDUMP output @Input bPDump - Pdump the sleep @Return Nothing ******************************************************************************/ static IMG_VOID SGXResetSleep(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDUMPFlags, IMG_BOOL bPDump) { #if defined(PDUMP) || defined(EMULATOR) IMG_UINT32 ui32ReadRegister; #if defined(SGX_FEATURE_MP) ui32ReadRegister = EUR_CR_MASTER_SOFT_RESET; #else ui32ReadRegister = EUR_CR_SOFT_RESET; #endif /* SGX_FEATURE_MP */ #endif #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif /* PDUMP */ /* Sleep for 100 SGX clocks */ SGXWaitClocks(psDevInfo, 100); if (bPDump) { PDUMPIDLWITHFLAGS(30, ui32PDUMPFlags); #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Read back to flush the register writes\r\n"); PDumpRegRead(SGX_PDUMPREG_NAME, ui32ReadRegister, ui32PDUMPFlags); #endif } #if defined(EMULATOR) /* Read a register to make sure we wait long enough on the emulator... */ OSReadHWReg(psDevInfo->pvRegsBaseKM, ui32ReadRegister); #endif }
/* Workout how much space this command will require */ PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32ClientFenceCount, PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, IMG_UINT32 *paui32FenceValue, IMG_UINT32 ui32ClientUpdateCount, PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, IMG_UINT32 *paui32UpdateValue, IMG_UINT32 ui32ServerSyncCount, IMG_UINT32 *paui32ServerSyncFlags, SERVER_SYNC_PRIMITIVE **papsServerSyncs, IMG_UINT32 ui32CmdSize, IMG_PBYTE pui8DMCmd, PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr, PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr, PRGXFWIF_UFO_ADDR * ppRMWUFOAddr, RGXFWIF_CCB_CMD_TYPE eType, IMG_BOOL bPDumpContinuous, IMG_CHAR *pszCommandName, RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) { IMG_UINT32 ui32FenceCount; IMG_UINT32 ui32UpdateCount; IMG_UINT32 i; /* Save the data we require in the submit call */ psCmdHelperData->psClientCCB = psClientCCB; psCmdHelperData->bPDumpContinuous = bPDumpContinuous; psCmdHelperData->pszCommandName = pszCommandName; /* Client sync data */ psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount; psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress; psCmdHelperData->paui32FenceValue = paui32FenceValue; psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount; psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress; psCmdHelperData->paui32UpdateValue = paui32UpdateValue; /* Server sync data */ psCmdHelperData->ui32ServerSyncCount = ui32ServerSyncCount; psCmdHelperData->paui32ServerSyncFlags = paui32ServerSyncFlags; psCmdHelperData->papsServerSyncs = papsServerSyncs; /* Command data */ psCmdHelperData->ui32CmdSize = ui32CmdSize; psCmdHelperData->pui8DMCmd = pui8DMCmd; psCmdHelperData->eType = eType; PDUMPCOMMENTWITHFLAGS((bPDumpContinuous) ? PDUMP_FLAGS_CONTINUOUS : 0, "%s Command Server Init on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr); /* Init the generated data members */ psCmdHelperData->ui32ServerFenceCount = 0; psCmdHelperData->ui32ServerUpdateCount = 0; psCmdHelperData->ui32PreTimeStampCmdSize = 0; psCmdHelperData->ui32PostTimeStampCmdSize = 0; psCmdHelperData->ui32RMWUFOCmdSize = 0; if (ppPreAddr && (ppPreAddr->ui32Addr != 0)) { psCmdHelperData->pPreTimestampAddr = * ppPreAddr; psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1)); } if (ppPostAddr && (ppPostAddr->ui32Addr != 0)) { psCmdHelperData->pPostTimestampAddr = * ppPostAddr; psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1)); } if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0)) { psCmdHelperData->pRMWUFOAddr = * ppRMWUFOAddr; psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO); } /* Workout how many fence and update's this command will have */ for (i = 0; i < ui32ServerSyncCount; i++) { if (paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) { /* Server syncs must fence */ psCmdHelperData->ui32ServerFenceCount++; } if (paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE) { psCmdHelperData->ui32ServerUpdateCount++; } } /* Total fence command size (header plus command data) */ ui32FenceCount = ui32ClientFenceCount + psCmdHelperData->ui32ServerFenceCount; if (ui32FenceCount) { psCmdHelperData->ui32FenceCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32FenceCount * sizeof(RGXFWIF_UFO)) + sizeof(RGXFWIF_CCB_CMD_HEADER)); } else { psCmdHelperData->ui32FenceCmdSize = 0; } /* Total DM command size (header plus command data) */ psCmdHelperData->ui32DMCmdSize = RGX_CCB_FWALLOC_ALIGN(ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)); /* Total update command size (header plus command data) */ ui32UpdateCount = ui32ClientUpdateCount + psCmdHelperData->ui32ServerUpdateCount; if (ui32UpdateCount) { psCmdHelperData->ui32UpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32UpdateCount * sizeof(RGXFWIF_UFO)) + sizeof(RGXFWIF_CCB_CMD_HEADER)); } else { psCmdHelperData->ui32UpdateCmdSize = 0; } return PVRSRV_OK; }
/****************************************************************************** FUNCTION : RGXAcquireCCB PURPOSE : Obtains access to write some commands to a CCB PARAMETERS : psClientCCB - The client CCB ui32CmdSize - How much space is required ppvBufferSpace - Pointer to space in the buffer bPDumpContinuous - Should this be PDump continuous? RETURNS : PVRSRV_ERROR ******************************************************************************/ IMG_INTERNAL PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize, IMG_PVOID *ppvBufferSpace, IMG_BOOL bPDumpContinuous) { PVRSRV_ERROR eError; IMG_UINT32 ui32PDumpFlags = bPDumpContinuous ? PDUMP_FLAGS_CONTINUOUS : 0; IMG_BOOL bInCaptureRange; IMG_BOOL bPdumpEnabled; PDumpIsCaptureFrameKM(&bInCaptureRange); bPdumpEnabled = (bInCaptureRange || bPDumpContinuous); /* PDumpSetFrame will detect as we Transition into capture range for frame based data but if we are PDumping continuous data then we need to inform the PDump layer ourselves */ if (bPDumpContinuous && !bInCaptureRange) { eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_TRUE, IMG_TRUE); if (eError != PVRSRV_OK) { return eError; } } /* Check that the CCB can hold this command + padding */ if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) { PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)\n", ui32CmdSize, psClientCCB->ui32Size)); return PVRSRV_ERROR_CMD_TOO_BIG; } /* Check we don't overflow the end of the buffer and make sure we have enough for the padding command. */ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size) { RGXFWIF_CCB_CMD_HEADER *psHeader; IMG_VOID *pvHeader; PVRSRV_ERROR eError; IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; /* We're at the end of the buffer without enough contiguous space */ eError = _RGXAcquireCCB(psClientCCB, ui32Remain, &pvHeader); if (eError != PVRSRV_OK) { /* It's possible no commands have been processed in which case as we can fail the padding allocation due to that fact we never allow the client CCB to be full */ return eError; } psHeader = pvHeader; psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); if (bPdumpEnabled) { DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, psClientCCB->ui32HostWriteOffset, ui32Remain, ui32PDumpFlags); } UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, ui32Remain, psClientCCB->ui32Size); } return _RGXAcquireCCB(psClientCCB, ui32CmdSize, ppvBufferSpace); }
/* Fill in the server syncs data and release the CCB space */ IMG_VOID RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, const IMG_CHAR *pcszDMName, IMG_UINT32 ui32CtxAddr) { IMG_UINT32 ui32AllocSize = 0; IMG_UINT32 i; /* Workout how much space we need for all the command(s) */ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); /* For each command fill in the server sync info */ for (i=0;i<ui32CmdCount;i++) { RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i]; IMG_UINT8 *pui8ServerFenceStart = psCmdHelperData->pui8ServerFenceStart; IMG_UINT8 *pui8ServerUpdateStart = psCmdHelperData->pui8ServerUpdateStart; IMG_UINT32 j; /* Now fill in the server fence and updates together */ for (j = 0; j < psCmdHelperData->ui32ServerSyncCount; j++) { RGXFWIF_UFO *psUFOPtr; IMG_UINT32 ui32UpdateValue; IMG_UINT32 ui32FenceValue; PVRSRV_ERROR eError; IMG_BOOL bFence = ((psCmdHelperData->paui32ServerSyncFlags[j] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)!=0)?IMG_TRUE:IMG_FALSE; IMG_BOOL bUpdate = ((psCmdHelperData->paui32ServerSyncFlags[j] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)!=0)?IMG_TRUE:IMG_FALSE; eError = PVRSRVServerSyncQueueHWOpKM(psCmdHelperData->papsServerSyncs[j], bUpdate, &ui32FenceValue, &ui32UpdateValue); /* This function can't fail */ PVR_ASSERT(eError == PVRSRV_OK); /* As server syncs always fence (we have a check in RGXCmcdHelperInitCmdCCB which ensures the client is playing ball) the filling in of the fence is unconditional. */ if (bFence) { PVR_ASSERT(pui8ServerFenceStart != 0); psUFOPtr = (RGXFWIF_UFO *) pui8ServerFenceStart; psUFOPtr->puiAddrUFO.ui32Addr = ServerSyncGetFWAddr(psCmdHelperData->papsServerSyncs[j]); psUFOPtr->ui32Value = ui32FenceValue; pui8ServerFenceStart += sizeof(RGXFWIF_UFO); #if defined(LINUX) trace_rogue_fence_checks(pcszDMName, ui32CtxAddr, psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, 1, &psUFOPtr->puiAddrUFO, &psUFOPtr->ui32Value); #endif } /* If there is an update then fill that in as well */ if (bUpdate) { PVR_ASSERT(pui8ServerUpdateStart != 0); psUFOPtr = (RGXFWIF_UFO *) pui8ServerUpdateStart; psUFOPtr->puiAddrUFO.ui32Addr = ServerSyncGetFWAddr(psCmdHelperData->papsServerSyncs[j]); psUFOPtr->ui32Value = ui32UpdateValue; pui8ServerUpdateStart += sizeof(RGXFWIF_UFO); #if defined(LINUX) trace_rogue_fence_updates(pcszDMName, ui32CtxAddr, psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, 1, &psUFOPtr->puiAddrUFO, &psUFOPtr->ui32Value); #endif #if defined(NO_HARDWARE) /* There is no FW so the host has to do any Sync updates (client sync updates are done in the client */ PVRSRVServerSyncPrimSetKM(psCmdHelperData->papsServerSyncs[j], ui32UpdateValue); #endif } } #if defined(LINUX) trace_rogue_fence_checks(pcszDMName, ui32CtxAddr, psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, psCmdHelperData->ui32ClientFenceCount, psCmdHelperData->pauiFenceUFOAddress, psCmdHelperData->paui32FenceValue); trace_rogue_fence_updates(pcszDMName, ui32CtxAddr, psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, psCmdHelperData->ui32ClientUpdateCount, psCmdHelperData->pauiUpdateUFOAddress, psCmdHelperData->paui32UpdateValue); #endif if (psCmdHelperData->ui32ServerSyncCount) { /* Do some sanity checks to ensure we did the point math right */ if (pui8ServerFenceStart != 0) { PVR_ASSERT(pui8ServerFenceStart == (psCmdHelperData->pui8StartPtr + psCmdHelperData->ui32FenceCmdSize)); } if (pui8ServerUpdateStart != 0) { PVR_ASSERT(pui8ServerUpdateStart == psCmdHelperData->pui8StartPtr + psCmdHelperData->ui32FenceCmdSize + psCmdHelperData->ui32PreTimeStampCmdSize + psCmdHelperData->ui32DMCmdSize + psCmdHelperData->ui32RMWUFOCmdSize + psCmdHelperData->ui32PostTimeStampCmdSize + psCmdHelperData->ui32UpdateCmdSize); } } /* All the commands have been filled in so release the CCB space. The FW still won't run this command until we kick it */ PDUMPCOMMENTWITHFLAGS((psCmdHelperData->bPDumpContinuous) ? PDUMP_FLAGS_CONTINUOUS : 0, "%s Command Server Release on FWCtx %08x", psCmdHelperData->pszCommandName, ui32CtxAddr); } _RGXClientCCBDumpCommands(asCmdHelperData[0].psClientCCB, asCmdHelperData[0].psClientCCB->ui32HostWriteOffset, ui32AllocSize); RGXReleaseCCB(asCmdHelperData[0].psClientCCB, ui32AllocSize, asCmdHelperData[0].bPDumpContinuous); }
/*! ******************************************************************************* @Function SGXReset @Description Reset chip @Input psDevInfo - device info. structure @Input bHardwareRecovery - true if recovering powered hardware, false if powering up @Input ui32PDUMPFlags - flags to control PDUMP output @Return IMG_VOID ******************************************************************************/ IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_BOOL bHardwareRecovery, IMG_UINT32 ui32PDUMPFlags) #if !defined(SGX_FEATURE_MP) { IMG_UINT32 ui32RegVal; #if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK) const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK; #else const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK; #endif #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif /* PDUMP */ PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n"); #if defined(FIX_HW_BRN_23944) /* Pause the BIF. */ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); if (ui32RegVal & ui32BifFaultMask) { /* Page fault needs to be cleared before resetting the BIF. */ ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); } #endif /* defined(FIX_HW_BRN_23944) */ /* Reset all including BIF */ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); /* Initialise the BIF state. */ #if defined(SGX_FEATURE_36BIT_MMU) /* enable 36bit addressing mode if the MMU supports it*/ OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags); #endif SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags); #if defined(EUR_CR_BIF_MEM_ARB_CONFIG) /* Initialise the memory arbiter to its default state */ ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) | (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) | (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags); #endif /* EUR_CR_BIF_MEM_ARB_CONFIG */ #if defined(SGX_FEATURE_SYSTEM_CACHE) #if defined(SGX_BYPASS_SYSTEM_CACHE) /* set the SLC to bypass all accesses */ ui32RegVal = MNE_CR_CTRL_BYPASS_ALL_MASK; #else #if defined(FIX_HW_BRN_26620) ui32RegVal = 0; #else /* set the SLC to bypass cache-coherent accesses */ ui32RegVal = MNE_CR_CTRL_BYP_CC_MASK; #endif #if defined(FIX_HW_BRN_34028) /* Bypass the MNE for the USEC requester */ ui32RegVal |= (8 << MNE_CR_CTRL_BYPASS_SHIFT); #endif #endif /* SGX_BYPASS_SYSTEM_CACHE */ OSWriteHWReg(psDevInfo->pvRegsBaseKM, MNE_CR_CTRL, ui32RegVal); PDUMPREG(SGX_PDUMPREG_NAME, MNE_CR_CTRL, ui32RegVal); #endif /* SGX_FEATURE_SYSTEM_CACHE */ if (bHardwareRecovery) { /* Set all requestors to the dummy PD which forces all memory accesses to page fault. This enables us to flush out BIF requests from parts of SGX which do not have their own soft reset. Note: sBIFResetPDDevPAddr.uiAddr is a relative address (2GB max) MSB is the bus master flag; 1 == enabled */ ui32RegVal = (IMG_UINT32)psDevInfo->sBIFResetPDDevPAddr.uiAddr; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); /* Bring BIF out of reset. */ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); /* Check for a page fault from parts of SGX which do not have a reset. */ for (;;) { IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); IMG_DEV_VIRTADDR sBifFault; IMG_UINT32 ui32PDIndex, ui32PTIndex; if ((ui32BifIntStat & ui32BifFaultMask) == 0) { break; } /* There is a page fault, so reset the BIF again, map in the dummy page, bring the BIF up and invalidate the Directory Cache. */ sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr)); ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; /* Put the BIF into reset. */ SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE); /* Map in the dummy page. */ psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) | SGX_MMU_PDE_PAGE_SIZE_4K | SGX_MMU_PDE_VALID; psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr >>SGX_MMU_PTE_ADDR_ALIGNSHIFT) | SGX_MMU_PTE_VALID; /* Clear outstanding events. */ ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); /* Bring the BIF out of reset. */ SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); /* Invalidate Directory Cache. */ SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); /* Unmap the dummy page and try again. */ psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0; psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0; } } else {
IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_BOOL bHardwareRecovery, IMG_UINT32 ui32PDUMPFlags) { IMG_UINT32 ui32RegVal; #if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK) const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK; #else const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK; #endif #ifndef PDUMP PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif psDevInfo->ui32NumResets++; PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n"); #if defined(FIX_HW_BRN_23944) ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); if (ui32RegVal & ui32BifFaultMask) { ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); } #endif SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); #if defined(SGX_FEATURE_36BIT_MMU) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags); #endif ui32RegVal = 0; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); #if defined(SGX_FEATURE_MP) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); #endif #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags); #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) { IMG_UINT32 ui32DirList, ui32DirListReg; for (ui32DirList = 1; ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS; ui32DirList++) { ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1); OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, ui32DirListReg, ui32RegVal, ui32PDUMPFlags); } } #endif #if defined(EUR_CR_BIF_MEM_ARB_CONFIG) ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) | (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) | (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags); #endif #if defined(SGX_FEATURE_SYSTEM_CACHE) #if defined(SGX_FEATURE_MP) #if defined(SGX_BYPASS_SYSTEM_CACHE) #error SGX_BYPASS_SYSTEM_CACHE not supported #else ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK | #if defined(FIX_HW_BRN_30954) EUR_CR_MASTER_SLC_CTRL_DISABLE_REORDERING_MASK | #endif (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL, ui32RegVal); PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL, ui32RegVal); ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK; #if defined(FIX_HW_BRN_31195) ui32RegVal |= EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE0_MASK | EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE1_MASK | EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE2_MASK | EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_USE3_MASK | EUR_CR_MASTER_SLC_CTRL_BYPASS_REQ_TA_MASK; #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal); PDUMPREG(SGX_PDUMPREG_NAME, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal); #endif #else #if defined(SGX_BYPASS_SYSTEM_CACHE) ui32RegVal = MNE_CR_CTRL_BYPASS_ALL_MASK; #else #if defined(FIX_HW_BRN_26620) ui32RegVal = 0; #else ui32RegVal = MNE_CR_CTRL_BYP_CC_MASK; #endif #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, MNE_CR_CTRL, ui32RegVal); PDUMPREG(SGX_PDUMPREG_NAME, MNE_CR_CTRL, ui32RegVal); #endif #endif if (bHardwareRecovery) { ui32RegVal = (IMG_UINT32)psDevInfo->sBIFResetPDDevPAddr.uiAddr; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); for (;;) { IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); IMG_DEV_VIRTADDR sBifFault; IMG_UINT32 ui32PDIndex, ui32PTIndex; if ((ui32BifIntStat & ui32BifFaultMask) == 0) { break; } sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr)); ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE); psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) | SGX_MMU_PDE_PAGE_SIZE_4K | SGX_MMU_PDE_VALID; psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr >>SGX_MMU_PTE_ADDR_ALIGNSHIFT) | SGX_MMU_PTE_VALID; ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0; psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0; } } else {
PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo) { PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; RGXFWIF_KCCB_CMD sFlushCmd; PVRSRV_ERROR eError = PVRSRV_OK; RGXFWIF_DM eDMcount = RGXFWIF_DM_MAX; if (!ui32CacheOpps) { goto _PVRSRVPowerLock_Exit; } sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE; /* Set which memory context this command is for (all ctxs for now) */ ui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL; #if 0 sFlushCmd.uCmdData.sMMUCacheData.psMemoryContext = ??? #endif /* PVRSRVPowerLock guarantees atomicity between commands and global variables consistency. * This is helpful in a scenario with several applications allocating resources. */ eError = PVRSRVPowerLock(); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_WARNING, "RGXPreKickCacheCommand: failed to acquire powerlock (%s)", PVRSRVGetErrorStringKM(eError))); goto _PVRSRVPowerLock_Exit; } PDUMPPOWCMDSTART(); eError = PVRSRVSetDevicePowerStateKM(psDeviceNode->sDevId.ui32DeviceIndex, PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE); PDUMPPOWCMDEND(); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_WARNING, "RGXPreKickCacheCommand: failed to transition RGX to ON (%s)", PVRSRVGetErrorStringKM(eError))); goto _PVRSRVSetDevicePowerStateKM_Exit; } sFlushCmd.uCmdData.sMMUCacheData.ui32Flags = ui32CacheOpps; sFlushCmd.uCmdData.sMMUCacheData.ui32CacheSequenceNum = ++ui32CacheOpSequence; #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit MMU flush and invalidate (flags = 0x%08x, cache operation sequence = %u)", ui32CacheOpps, ui32CacheOpSequence); #endif ui32CacheOpps = 0; /* Schedule MMU cache command */ do { eDMcount--; eError = RGXSendCommandRaw(psDevInfo, eDMcount, &sFlushCmd, sizeof(RGXFWIF_KCCB_CMD), PDUMP_FLAGS_CONTINUOUS); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXPreKickCacheCommand: Failed to schedule MMU cache command \ to DM=%d with error (%u)", eDMcount, eError)); break; } } while(eDMcount > 0); _PVRSRVSetDevicePowerStateKM_Exit: PVRSRVPowerUnlock(); _PVRSRVPowerLock_Exit: return eError; }
PVRSRV_ERROR PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDevNode, SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, IMG_UINT32 *puiSyncPrimVAddr, IMG_UINT32 *puiSyncPrimBlockSize, DEVMEM_EXPORTCOOKIE **psExportCookie) { SYNC_PRIMITIVE_BLOCK *psNewSyncBlk; PVRSRV_ERROR eError; psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK)); if (psNewSyncBlk == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e0; } psNewSyncBlk->psDevNode = psDevNode; PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block"); eError = psDevNode->pfnAllocUFOBlock(psDevNode, &psNewSyncBlk->psMemDesc, puiSyncPrimVAddr, &psNewSyncBlk->ui32BlockSize); if (eError != PVRSRV_OK) { goto e1; } eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc, (IMG_PVOID *) &psNewSyncBlk->pui32LinAddr); if (eError != PVRSRV_OK) { goto e2; } eError = DevmemExport(psNewSyncBlk->psMemDesc, &psNewSyncBlk->sExportCookie); if (eError != PVRSRV_OK) { goto e3; } eError = OSLockCreate(&psNewSyncBlk->hLock, LOCK_TYPE_NONE); if (eError != PVRSRV_OK) { goto e4; } psNewSyncBlk->ui32RefCount = 1; /* If there is a connection pointer then add the new block onto it's list */ _SyncConnectionAddBlock(psConnection, psNewSyncBlk); *psExportCookie = &psNewSyncBlk->sExportCookie; *ppsSyncBlk = psNewSyncBlk; *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize; PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocated UFO block (FirmwareVAddr = 0x%08x)", *puiSyncPrimVAddr); return PVRSRV_OK; e4: DevmemUnexport(psNewSyncBlk->psMemDesc, &psNewSyncBlk->sExportCookie); e3: DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc); e2: psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc); e1: OSFreeMem(psNewSyncBlk); e0: return eError; }
PVRSRV_ERROR RGXSLCCacheInvalidateRequest(PVRSRV_DEVICE_NODE *psDeviceNode, PMR *psPmr) { RGXFWIF_KCCB_CMD sFlushInvalCmd; IMG_UINT32 ulPMRFlags; IMG_UINT32 ui32DeviceCacheFlags; PVRSRV_ERROR eError = PVRSRV_OK; PVR_ASSERT(psDeviceNode); /* In DEINIT state, we stop scheduling SLC flush commands, because we don't know in what state the firmware is. * Anyway, if we are in DEINIT state, we don't care anymore about FW memory consistency */ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT) { /* get the PMR's caching flags */ eError = PMR_Flags(psPmr, &ulPMRFlags); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_WARNING, "RGXSLCCacheInvalidateRequest: Unable to get the caching attributes of PMR %p",psPmr)); } ui32DeviceCacheFlags = DevmemDeviceCacheMode(ulPMRFlags); /* Schedule a SLC flush and invalidate if * - the memory is cached. * - we can't get the caching attributes (by precaution). */ if ((ui32DeviceCacheFlags == PVRSRV_MEMALLOCFLAG_GPU_CACHED) || (eError != PVRSRV_OK)) { /* Schedule the SLC flush command ... */ #if defined(PDUMP) PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate"); #endif sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE; sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0; sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0; eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice, RGXFWIF_DM_GP, &sFlushInvalCmd, sizeof(sFlushInvalCmd), IMG_TRUE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXSLCCacheInvalidateRequest: Failed to schedule SLC flush command with error (%u)", eError)); } else { /* Wait for the SLC flush to complete */ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, IMG_TRUE); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXSLCCacheInvalidateRequest: SLC flush and invalidate aborted with error (%u)", eError)); } } } } return eError; }
static PVRSRV_ERROR _RGXCCBPDumpTransition(IMG_PVOID *pvData, IMG_BOOL bInto, IMG_BOOL bContinuous) { RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData; IMG_UINT32 ui32PDumpFlags = bContinuous ? PDUMP_FLAGS_CONTINUOUS:0; /* We're about to Transition into capture range and we've submitted new commands since the last time we entered capture range so drain the CCB as required */ if (bInto) { volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl; PVRSRV_ERROR eError; /* Wait for the FW to catch up (retry will get pushed back out services client where we wait on the event object and try again later) */ if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset) { return PVRSRV_ERROR_RETRY; } /* We drain whenever capture range is entered. Even if no commands have been issued while where out of capture range we have to wait for operations that we might have issued in the last capture range to finish so the sync prim update that will happen after all the PDumpTransition callbacks have been called doesn't clobber syncs which the FW is currently working on. Although this is suboptimal, while out of capture range for every persistent operation we serialise the PDump script processing and the FW, there is no easy solution. Not all modules that work on syncs register a PDumpTransition and thus we have no way of knowing if we can skip drain and the sync prim dump or not. */ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%s@%p): Draining rgxfw_roff == woff (%d)", psClientCCB->szName, psClientCCB, psClientCCB->ui32LastPDumpWriteOffset); eError = DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc, offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), psClientCCB->ui32LastPDumpWriteOffset, 0xffffffff, PDUMP_POLL_OPERATOR_EQUAL, ui32PDumpFlags); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_WARNING, "_RGXCCBPDumpTransition: problem pdumping POL for cCCBCtl (%d)", eError)); } PVR_ASSERT(eError == PVRSRV_OK); /* If new command(s) have been written out of capture range then we need to fast forward past uncaptured operations. */ if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset) { /* There are commands that where not captured so after the simulation drain (above) we also need to fast-forward pass those commands so the FW can start with the 1st command which is in the new capture range */ psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset; PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%s@%p): Fast-forward from %d to %d", psClientCCB->szName, psClientCCB, psClientCCB->ui32LastPDumpWriteOffset, psClientCCB->ui32HostWriteOffset); DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, 0, sizeof(RGXFWIF_CCCB_CTL), ui32PDumpFlags); /* Although we've entered capture range we might not do any work on this CCB so update the ui32LastPDumpWriteOffset to reflect where we got to for next so we start the drain from where we got to last time */ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; } } return PVRSRV_OK; }
PVRSRV_ERROR PVRSRVAllocSyncPrimitiveBlockKM(PVRSRV_DEVICE_NODE *psDevNode, SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, IMG_UINT32 *puiSyncPrimVAddr, IMG_UINT32 *puiSyncPrimBlockSize, DEVMEM_EXPORTCOOKIE **psExportCookie) { SYNC_PRIMITIVE_BLOCK *psNewSyncBlk; PVRSRV_ERROR eError; psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK)); if (psNewSyncBlk == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e0; } psNewSyncBlk->psDevNode = psDevNode; PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block"); eError = psDevNode->pfnAllocUFOBlock(psDevNode, &psNewSyncBlk->psMemDesc, puiSyncPrimVAddr, &psNewSyncBlk->ui32BlockSize); if (eError != PVRSRV_OK) { goto e1; } eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc, (IMG_PVOID *) &psNewSyncBlk->pui32LinAddr); if (eError != PVRSRV_OK) { goto e2; } eError = DevmemExport(psNewSyncBlk->psMemDesc, &psNewSyncBlk->sExportCookie); if (eError != PVRSRV_OK) { goto e3; } psNewSyncBlk->ui32RefCount = 0; _SyncPrimitiveBlockRef(psNewSyncBlk); *psExportCookie = &psNewSyncBlk->sExportCookie; *ppsSyncBlk = psNewSyncBlk; *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize; PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocated UFO block (FirmwareVAddr = 0x%08x)", *puiSyncPrimVAddr); return PVRSRV_OK; e3: psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc); e2: DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc); e1: OSFreeMem(psNewSyncBlk); e0: return eError; }
void SGXReset(struct PVRSRV_SGXDEV_INFO *psDevInfo, u32 ui32PDUMPFlags) { u32 ui32RegVal; const u32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK; #ifndef PDUMP PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif psDevInfo->ui32NumResets++; PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n"); #if defined(FIX_HW_BRN_23944) ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); if (ui32RegVal & ui32BifFaultMask) { ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); } #endif SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); for (;;) { u32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); struct IMG_DEV_VIRTADDR sBifFault; u32 ui32PDIndex, ui32PTIndex; if ((ui32BifIntStat & ui32BifFaultMask) == 0) break; sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); PVR_DPF(PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr); ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE); psDevInfo->pui32BIFResetPD[ui32PDIndex] = psDevInfo->sBIFResetPTDevPAddr.uiAddr | SGX_MMU_PDE_VALID; psDevInfo->pui32BIFResetPT[ui32PTIndex] = psDevInfo->sBIFResetPageDevPAddr.uiAddr | SGX_MMU_PTE_VALID; ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0; psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0; } OSWriteHWReg(psDevInfo->pvRegsBaseKM, SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr); PDUMPPDREGWITHFLAGS(SGX_BIF_DIR_LIST_REG_EDM, psDevInfo->sKernelPDDevPAddr.uiAddr, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE); PVR_DPF(PVR_DBG_WARNING, "Soft Reset of SGX"); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = 0; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n"); }
IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_UINT32 ui32PDUMPFlags) { IMG_UINT32 ui32RegVal; #if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK) const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK; #else const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK; #endif #ifndef PDUMP PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif psDevInfo->ui32NumResets++; PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n"); #if defined(FIX_HW_BRN_23944) ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); if (ui32RegVal & ui32BifFaultMask) { ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); } #endif SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); #if defined(SGX_FEATURE_36BIT_MMU) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK); PDUMPREGWITHFLAGS(EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags); #endif ui32RegVal = 0; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); #if defined(SGX_FEATURE_MP) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_MASTER_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); #endif #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK_SET, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK_SET, ui32RegVal, ui32PDUMPFlags); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal, ui32PDUMPFlags); #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) { IMG_UINT32 ui32DirList, ui32DirListReg; for (ui32DirList = 1; ui32DirList < SGX_FEATURE_BIF_NUM_DIRLISTS; ui32DirList++) { ui32DirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (ui32DirList - 1); OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32DirListReg, ui32RegVal); PDUMPREGWITHFLAGS(ui32DirListReg, ui32RegVal, ui32PDUMPFlags); } } #endif #if defined(EUR_CR_BIF_MEM_ARB_CONFIG) ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) | (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) | (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags); #endif #if defined(SGX_FEATURE_SYSTEM_CACHE) #if defined(SGX_FEATURE_MP) #if defined(SGX_BYPASS_SYSTEM_CACHE) #error SGX_BYPASS_SYSTEM_CACHE not supported #else ui32RegVal = EUR_CR_MASTER_SLC_CTRL_USSE_INVAL_REQ0_MASK | (0xC << EUR_CR_MASTER_SLC_CTRL_ARB_PAGE_SIZE_SHIFT); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL, ui32RegVal); PDUMPREG(EUR_CR_MASTER_SLC_CTRL, ui32RegVal); ui32RegVal = EUR_CR_MASTER_SLC_CTRL_BYPASS_BYP_CC_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal); PDUMPREG(EUR_CR_MASTER_SLC_CTRL_BYPASS, ui32RegVal); #endif #else #if defined(SGX_BYPASS_SYSTEM_CACHE) ui32RegVal = EUR_CR_MNE_CR_CTRL_BYPASS_ALL_MASK; #else #if defined(FIX_HW_BRN_26620) ui32RegVal = 0; #else ui32RegVal = EUR_CR_MNE_CR_CTRL_BYP_CC_MASK; #endif #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MNE_CR_CTRL, ui32RegVal); PDUMPREG(EUR_CR_MNE_CR_CTRL, ui32RegVal); #endif #endif ui32RegVal = psDevInfo->sBIFResetPDDevPAddr.uiAddr; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); for (;;) { IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); IMG_DEV_VIRTADDR sBifFault; IMG_UINT32 ui32PDIndex, ui32PTIndex; if ((ui32BifIntStat & ui32BifFaultMask) == 0) { break; } sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr)); ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE); psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) | SGX_MMU_PDE_PAGE_SIZE_4K | SGX_MMU_PDE_VALID; psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr >>SGX_MMU_PTE_ADDR_ALIGNSHIFT) | SGX_MMU_PTE_VALID; ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0; psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0; } #if defined(SGX_FEATURE_MULTIPLE_MEM_CONTEXTS) ui32RegVal = (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_EDM_SHIFT); #if defined(SGX_FEATURE_2D_HARDWARE) ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_2D_SHIFT); #endif #if defined(FIX_HW_BRN_23410) ui32RegVal |= (SGX_BIF_DIR_LIST_INDEX_EDM << EUR_CR_BIF_BANK0_INDEX_TA_SHIFT); #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_BANK0, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_BIF_BANK0, ui32RegVal, ui32PDUMPFlags); #endif { IMG_UINT32 ui32EDMDirListReg; #if (SGX_BIF_DIR_LIST_INDEX_EDM == 0) ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE0; #else ui32EDMDirListReg = EUR_CR_BIF_DIR_LIST_BASE1 + 4 * (SGX_BIF_DIR_LIST_INDEX_EDM - 1); #endif #if defined(FIX_HW_BRN_28011) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT); PDUMPPDREGWITHFLAGS(EUR_CR_BIF_DIR_LIST_BASE0, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, ui32EDMDirListReg, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT); PDUMPPDREGWITHFLAGS(ui32EDMDirListReg, psDevInfo->sKernelPDDevPAddr.uiAddr>>SGX_MMU_PDE_ADDR_ALIGNSHIFT, ui32PDUMPFlags, PDUMP_PD_UNIQUETAG); } #ifdef SGX_FEATURE_2D_HARDWARE #if ((SGX_2D_HEAP_BASE & ~EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK) != 0) #error "SGXReset: SGX_2D_HEAP_BASE doesn't match EUR_CR_BIF_TWOD_REQ_BASE_ADDR_MASK alignment" #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE); PDUMPREGWITHFLAGS(EUR_CR_BIF_TWOD_REQ_BASE, SGX_2D_HEAP_BASE, ui32PDUMPFlags); #endif SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_TRUE); PVR_DPF((PVR_DBG_MESSAGE,"Soft Reset of SGX")); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = 0; #if defined(SGX_FEATURE_MP) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_MASTER_SOFT_RESET, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_MASTER_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_SOFT_RESET, ui32RegVal); PDUMPREGWITHFLAGS(EUR_CR_SOFT_RESET, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "End of SGX reset sequence\r\n"); }
/* * PVRSRVSubmitTQ3DKickKM */ IMG_EXPORT PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, IMG_UINT32 ui32PrepareCount, IMG_UINT32 *paui32ClientFenceCount, PRGXFWIF_UFO_ADDR **papauiClientFenceUFOAddress, IMG_UINT32 **papaui32ClientFenceValue, IMG_UINT32 *paui32ClientUpdateCount, PRGXFWIF_UFO_ADDR **papauiClientUpdateUFOAddress, IMG_UINT32 **papaui32ClientUpdateValue, IMG_UINT32 *paui32ServerSyncCount, IMG_UINT32 **papaui32ServerSyncFlags, SERVER_SYNC_PRIMITIVE ***papapsServerSyncs, IMG_UINT32 ui32NumFenceFDs, IMG_INT32 *paui32FenceFDs, IMG_UINT32 *paui32FWCommandSize, IMG_UINT8 **papaui8FWCommand, IMG_UINT32 *pui32TQPrepareFlags) { PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; #if defined(WDDM) RGX_CCB_CMD_HELPER_DATA as3DCmdHelper[TQ_MAX_PREPARES_PER_SUBMIT]; RGX_CCB_CMD_HELPER_DATA as2DCmdHelper[TQ_MAX_PREPARES_PER_SUBMIT]; #endif RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper; RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper; IMG_UINT32 ui323DCmdCount = 0; IMG_UINT32 ui322DCmdCount = 0; IMG_BOOL bKick2D = IMG_FALSE; IMG_BOOL bKick3D = IMG_FALSE; IMG_BOOL bPDumpContinuous = IMG_FALSE; IMG_UINT32 i; IMG_UINT32 ui32IntClientFenceCount = 0; PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = IMG_NULL; IMG_UINT32 *paui32IntFenceValue = IMG_NULL; IMG_UINT32 ui32IntClientUpdateCount = 0; PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = IMG_NULL; IMG_UINT32 *paui32IntUpdateValue = IMG_NULL; PVRSRV_ERROR eError; PVRSRV_ERROR eError2; #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) IMG_BOOL bSyncsMerged = IMG_FALSE; #endif if (ui32PrepareCount == 0) { return PVRSRV_ERROR_INVALID_PARAMS; } if (ui32NumFenceFDs != 0) { #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) /* Fence FD's are only valid in the 3D case with no batching */ if ((ui32PrepareCount !=1) && (!TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[0], 3D))) { return PVRSRV_ERROR_INVALID_PARAMS; } #else /* We only support Fence FD's if built with PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC */ return PVRSRV_ERROR_INVALID_PARAMS; #endif } #if defined(WDDM) pas3DCmdHelper = &as3DCmdHelper; pas2DCmdHelper = &as2DCmdHelper; #else /* We can't allocate the required amount of stack space on all consumer architectures */ pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount); if (pas3DCmdHelper == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto fail_alloc3dhelper; } pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount); if (pas2DCmdHelper == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto fail_alloc2dhelper; } #endif /* Ensure we do the right thing for server syncs which cross call bounderies */ for (i=0;i<ui32PrepareCount;i++) { IMG_BOOL bHaveStartPrepare = pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_START; IMG_BOOL bHaveEndPrepare = IMG_FALSE; if (bHaveStartPrepare) { IMG_UINT32 k; /* We've at the start of a transfer operation (which might be made up of multiple HW operations) so check if we also have then end of the transfer operation in the batch */ for (k=i;k<ui32PrepareCount;k++) { if (pui32TQPrepareFlags[k] & TQ_PREP_FLAGS_END) { bHaveEndPrepare = IMG_TRUE; break; } } if (!bHaveEndPrepare) { /* We don't have the complete command passed in this call so drop the update request. When we get called again with the last HW command in this transfer operation we'll do the update at that point. */ for (k=0;k<paui32ServerSyncCount[i];k++) { papaui32ServerSyncFlags[i][k] &= ~PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE; } } } } /* Init the command helper commands for all the prepares */ for (i=0;i<ui32PrepareCount;i++) { RGX_CLIENT_CCB *psClientCCB; RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx; IMG_CHAR *pszCommandName; RGX_CCB_CMD_HELPER_DATA *psCmdHelper; RGXFWIF_CCB_CMD_TYPE eType; if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D)) { psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext; psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); pszCommandName = "TQ-3D"; psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++]; eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D; } else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D)) { psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext; psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); pszCommandName = "TQ-2D"; psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++]; eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D; } else { eError = PVRSRV_ERROR_INVALID_PARAMS; goto fail_cmdtype; } if (i == 0) { bPDumpContinuous = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) == TQ_PREP_FLAGS_PDUMPCONTINUOUS); PDUMPCOMMENTWITHFLAGS((bPDumpContinuous) ? PDUMP_FLAGS_CONTINUOUS : 0, "%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr); } else { IMG_BOOL bNewPDumpContinuous = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) == TQ_PREP_FLAGS_PDUMPCONTINUOUS); if (bNewPDumpContinuous != bPDumpContinuous) { eError = PVRSRV_ERROR_INVALID_PARAMS; PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __FUNCTION__)); goto fail_pdumpcheck; } } #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) if (ui32NumFenceFDs) { IMG_UINT32 ui32NumFenceSyncs; PRGXFWIF_UFO_ADDR *puiFenceFWAddrs; IMG_UINT32 *pui32FenceValues; IMG_UINT32 ui32NumUpdateSyncs; PRGXFWIF_UFO_ADDR *puiUpdateFWAddrs; IMG_UINT32 *pui32UpdateValues; /* FIXME: We can't be taking the server sync operations here as we have no way to undo them should the acquire fail. If client/local syncs where used here would that remove the issue? */ eError = PVRFDSyncQueryFencesKM(ui32NumFenceFDs, paui32FenceFDs, IMG_TRUE, &ui32NumFenceSyncs, &puiFenceFWAddrs, &pui32FenceValues, &ui32NumUpdateSyncs, &puiUpdateFWAddrs, &pui32UpdateValues); if (eError != PVRSRV_OK) { goto fail_fdsync; } /* Merge the Android syncs and the client syncs together */ ui32IntClientFenceCount = paui32ClientFenceCount[i] + ui32NumFenceSyncs; pauiIntFenceUFOAddress = OSAllocMem(sizeof(*pauiIntFenceUFOAddress)* ui32IntClientFenceCount); if (pauiIntFenceUFOAddress == IMG_NULL) { /* Free memory created by PVRFDSyncQueryFencesKM */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); goto fail_fenceUFOarray; } paui32IntFenceValue = OSAllocMem(sizeof(*paui32IntFenceValue)* ui32IntClientFenceCount); if (paui32IntFenceValue == IMG_NULL) { /* Free memory created by PVRFDSyncQueryFencesKM */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); OSFreeMem(pauiIntFenceUFOAddress); goto fail_fencevaluearray; } ui32IntClientUpdateCount = paui32ClientUpdateCount[i] + ui32NumUpdateSyncs; pauiIntUpdateUFOAddress = OSAllocMem(sizeof(*pauiIntUpdateUFOAddress)* ui32IntClientUpdateCount); if (pauiIntUpdateUFOAddress == IMG_NULL) { /* Free memory created by PVRFDSyncQueryFencesKM */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); OSFreeMem(pauiIntFenceUFOAddress); OSFreeMem(paui32IntFenceValue); goto fail_updateUFOarray; } paui32IntUpdateValue = OSAllocMem(sizeof(*paui32IntUpdateValue)* ui32IntClientUpdateCount); if (paui32IntUpdateValue == IMG_NULL) { /* Free memory created by PVRFDSyncQueryFencesKM */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); OSFreeMem(pauiIntFenceUFOAddress); OSFreeMem(paui32IntFenceValue); OSFreeMem(pauiIntUpdateUFOAddress); goto fail_updatevaluearray; } SYNC_MERGE_CLIENT_FENCES(ui32IntClientFenceCount, pauiIntFenceUFOAddress, paui32IntFenceValue, ui32NumFenceSyncs, puiFenceFWAddrs, pui32FenceValues, paui32ClientFenceCount[i], papauiClientFenceUFOAddress[i], papaui32ClientFenceValue[i]); SYNC_MERGE_CLIENT_UPDATES(ui32IntClientUpdateCount, pauiIntUpdateUFOAddress, paui32IntUpdateValue, ui32NumUpdateSyncs, puiUpdateFWAddrs, pui32UpdateValues, paui32ClientUpdateCount[i], papauiClientUpdateUFOAddress[i], papaui32ClientUpdateValue[i]); if (ui32NumFenceSyncs || ui32NumUpdateSyncs) { PDUMPCOMMENT("(TQ) Android native fences in use: %u fence syncs, %u update syncs", ui32NumFenceSyncs, ui32NumUpdateSyncs); } /* Free the data created by PVRFDSyncQueryFencesKM as it has now been merged into *IntClient* */ OSFreeMem(puiFenceFWAddrs); OSFreeMem(pui32FenceValues); OSFreeMem(puiUpdateFWAddrs); OSFreeMem(pui32UpdateValues); bSyncsMerged = IMG_TRUE; } else #endif /* PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC */ { /* No client sync merging so just copy across the pointers */ ui32IntClientFenceCount = paui32ClientFenceCount[i]; pauiIntFenceUFOAddress = papauiClientFenceUFOAddress[i]; paui32IntFenceValue = papaui32ClientFenceValue[i]; ui32IntClientUpdateCount = paui32ClientUpdateCount[i]; pauiIntUpdateUFOAddress = papauiClientUpdateUFOAddress[i]; paui32IntUpdateValue = papaui32ClientUpdateValue[i]; } /* Create the command helper data for this command */ eError = RGXCmdHelperInitCmdCCB(psClientCCB, ui32IntClientFenceCount, pauiIntFenceUFOAddress, paui32IntFenceValue, ui32IntClientUpdateCount, pauiIntUpdateUFOAddress, paui32IntUpdateValue, paui32ServerSyncCount[i], papaui32ServerSyncFlags[i], papapsServerSyncs[i], paui32FWCommandSize[i], papaui8FWCommand[i], eType, bPDumpContinuous, pszCommandName, psCmdHelper); if (eError != PVRSRV_OK) { goto fail_initcmd; } } /* Acquire space for all the commands in one go */ if (ui323DCmdCount) { eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, &pas3DCmdHelper[0], &bKick3D); if (eError != PVRSRV_OK) { if (bKick3D) { ui323DCmdCount = 0; ui322DCmdCount = 0; } else { goto fail_3dcmdacquire; } } } if (ui322DCmdCount) { eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount, &pas2DCmdHelper[0], &bKick2D); if (eError != PVRSRV_OK) { if (bKick2D || bKick3D) { ui323DCmdCount = 0; ui322DCmdCount = 0; } else { goto fail_2dcmdacquire; } } } /* We should acquire the kernel CCB(s) space here as the schedule could fail and we would have to roll back all the syncs */ /* Only do the command helper release (which takes the server sync operations if the acquire succeeded */ if (ui323DCmdCount) { RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, &pas3DCmdHelper[0], "TQ_3D", FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr); } if (ui322DCmdCount) { RGXCmdHelperReleaseCmdCCB(ui322DCmdCount, &pas2DCmdHelper[0], "TQ_2D", FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr); } /* Even if we failed to acquire the client CCB space we might still need to kick the HW to process a padding packet to release space for us next time round */ if (bKick3D) { RGXFWIF_KCCB_CMD s3DKCCBCmd; /* Construct the kernel 3D CCB command. */ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext); s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext)); s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) { eError2 = RGXScheduleCommand(psDeviceNode->pvDevice, RGXFWIF_DM_3D, &s3DKCCBCmd, sizeof(s3DKCCBCmd), bPDumpContinuous); if (eError2 != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); } END_LOOP_UNTIL_TIMEOUT(); } if (bKick2D) { RGXFWIF_KCCB_CMD s2DKCCBCmd; /* Construct the kernel 3D CCB command. */ s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext); s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext)); s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) { eError2 = RGXScheduleCommand(psDeviceNode->pvDevice, RGXFWIF_DM_2D, &s2DKCCBCmd, sizeof(s2DKCCBCmd), bPDumpContinuous); if (eError2 != PVRSRV_ERROR_RETRY) { break; } OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); } END_LOOP_UNTIL_TIMEOUT(); } /* * Now check eError (which may have returned an error from our earlier calls * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first * so we check it now... */ if (eError != PVRSRV_OK ) { goto fail_2dcmdacquire; } #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) /* Free the merged sync memory if required */ if (bSyncsMerged) { OSFreeMem(pauiIntFenceUFOAddress); OSFreeMem(paui32IntFenceValue); OSFreeMem(pauiIntUpdateUFOAddress); OSFreeMem(paui32IntUpdateValue); } #if defined(NO_HARDWARE) for (i = 0; i < ui32NumFenceFDs; i++) { eError = PVRFDSyncNoHwUpdateFenceKM(paui32FenceFDs[i]); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "%s: Failed nohw update on fence fd=%d (%s)", __func__, paui32FenceFDs[i], PVRSRVGetErrorStringKM(eError))); } } #endif #endif #if !defined(WDDM) OSFreeMem(pas2DCmdHelper); OSFreeMem(pas3DCmdHelper); #endif return PVRSRV_OK; /* No resources are created in this function so there is nothing to free unless we had to merge syncs. If we fail after the client CCB acquire there is still nothing to do as only the client CCB release will modify the client CCB */ fail_2dcmdacquire: fail_3dcmdacquire: fail_initcmd: #if defined(PVR_ANDROID_NATIVE_WINDOW_HAS_SYNC) if (bSyncsMerged) { OSFreeMem(pauiIntFenceUFOAddress); OSFreeMem(paui32IntFenceValue); OSFreeMem(pauiIntUpdateUFOAddress); OSFreeMem(paui32IntUpdateValue); } fail_updatevaluearray: fail_updateUFOarray: fail_fencevaluearray: fail_fenceUFOarray: fail_fdsync: #endif fail_pdumpcheck: fail_cmdtype: PVR_ASSERT(eError != PVRSRV_OK); #if !defined(WDDM) OSFreeMem(pas2DCmdHelper); fail_alloc2dhelper: OSFreeMem(pas3DCmdHelper); fail_alloc3dhelper: #endif return eError; }
IMG_VOID SGXReset(PVRSRV_SGXDEV_INFO *psDevInfo, IMG_BOOL bHardwareRecovery, IMG_UINT32 ui32PDUMPFlags) #if !defined(SGX_FEATURE_MP) { IMG_UINT32 ui32RegVal; #if defined(EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK) const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_REQ_MASK; #else const IMG_UINT32 ui32BifFaultMask = EUR_CR_BIF_INT_STAT_FAULT_MASK; #endif #if !defined(PDUMP) PVR_UNREFERENCED_PARAMETER(ui32PDUMPFlags); #endif PDUMPCOMMENTWITHFLAGS(ui32PDUMPFlags, "Start of SGX reset sequence\r\n"); #if defined(FIX_HW_BRN_23944) ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); if (ui32RegVal & ui32BifFaultMask) { ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK | EUR_CR_BIF_CTRL_CLEAR_FAULT_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); ui32RegVal = EUR_CR_BIF_CTRL_PAUSE_MASK; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_CTRL, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_CTRL, ui32RegVal, ui32PDUMPFlags); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); } #endif SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_TRUE); #if defined(SGX_FEATURE_36BIT_MMU) OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_36BIT_ADDRESSING, EUR_CR_BIF_36BIT_ADDRESSING_ENABLE_MASK, ui32PDUMPFlags); #endif SGXResetInitBIFContexts(psDevInfo, ui32PDUMPFlags); #if defined(EUR_CR_BIF_MEM_ARB_CONFIG) ui32RegVal = (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_PAGE_SIZE_SHIFT) | (7UL << EUR_CR_BIF_MEM_ARB_CONFIG_BEST_CNT_SHIFT) | (12UL << EUR_CR_BIF_MEM_ARB_CONFIG_TTE_THRESH_SHIFT); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal); PDUMPREGWITHFLAGS(SGX_PDUMPREG_NAME, EUR_CR_BIF_MEM_ARB_CONFIG, ui32RegVal, ui32PDUMPFlags); #endif #if defined(SGX_FEATURE_SYSTEM_CACHE) #if defined(SGX_BYPASS_SYSTEM_CACHE) ui32RegVal = MNE_CR_CTRL_BYPASS_ALL_MASK; #else #if defined(FIX_HW_BRN_26620) ui32RegVal = 0; #else ui32RegVal = MNE_CR_CTRL_BYP_CC_MASK; #endif #if defined(FIX_HW_BRN_34028) ui32RegVal |= (8 << MNE_CR_CTRL_BYPASS_SHIFT); #endif #endif OSWriteHWReg(psDevInfo->pvRegsBaseKM, MNE_CR_CTRL, ui32RegVal); PDUMPREG(SGX_PDUMPREG_NAME, MNE_CR_CTRL, ui32RegVal); #endif if (bHardwareRecovery) { ui32RegVal = (IMG_UINT32)psDevInfo->sBIFResetPDDevPAddr.uiAddr; OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_DIR_LIST_BASE0, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_TRUE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); for (;;) { IMG_UINT32 ui32BifIntStat = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_INT_STAT); IMG_DEV_VIRTADDR sBifFault; IMG_UINT32 ui32PDIndex, ui32PTIndex; if ((ui32BifIntStat & ui32BifFaultMask) == 0) { break; } sBifFault.uiAddr = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_BIF_FAULT); PVR_DPF((PVR_DBG_WARNING, "SGXReset: Page fault 0x%x/0x%x", ui32BifIntStat, sBifFault.uiAddr)); ui32PDIndex = sBifFault.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); ui32PTIndex = (sBifFault.uiAddr & SGX_MMU_PT_MASK) >> SGX_MMU_PAGE_SHIFT; SGXResetSoftReset(psDevInfo, IMG_TRUE, ui32PDUMPFlags, IMG_FALSE); psDevInfo->pui32BIFResetPD[ui32PDIndex] = (psDevInfo->sBIFResetPTDevPAddr.uiAddr >>SGX_MMU_PDE_ADDR_ALIGNSHIFT) | SGX_MMU_PDE_PAGE_SIZE_4K | SGX_MMU_PDE_VALID; psDevInfo->pui32BIFResetPT[ui32PTIndex] = (psDevInfo->sBIFResetPageDevPAddr.uiAddr >>SGX_MMU_PTE_ADDR_ALIGNSHIFT) | SGX_MMU_PTE_VALID; ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR, ui32RegVal); ui32RegVal = OSReadHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_STATUS2); OSWriteHWReg(psDevInfo->pvRegsBaseKM, EUR_CR_EVENT_HOST_CLEAR2, ui32RegVal); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetSoftReset(psDevInfo, IMG_FALSE, ui32PDUMPFlags, IMG_FALSE); SGXResetSleep(psDevInfo, ui32PDUMPFlags, IMG_FALSE); SGXResetInvalDC(psDevInfo, ui32PDUMPFlags, IMG_FALSE); psDevInfo->pui32BIFResetPD[ui32PDIndex] = 0; psDevInfo->pui32BIFResetPT[ui32PTIndex] = 0; } } else {