int sec_custom_threshold_set() { int i; if ((16 > sgx_dvfs_custom_threshold_size) && (custom_threshold_change == 1)) { PVR_LOG(("Error, custom_threshold element not enough[%d]!!", sgx_dvfs_custom_threshold_size)); custom_threshold_change = 0; return -1; } for (i = 0; i < GPU_DVFS_MAX_LEVEL; i++) { if (custom_threshold_change == 1) { g_gpu_dvfs_data[i].min_threadhold = custom_threshold[i * 4]; g_gpu_dvfs_data[i].max_threadhold = custom_threshold[i * 4 + 1]; g_gpu_dvfs_data[i].quick_down_threadhold = custom_threshold[i * 4 + 2]; g_gpu_dvfs_data[i].quick_up_threadhold = custom_threshold[i * 4 + 3]; PVR_LOG(("set custom_threshold level[%d] min[%d],max[%d],q_min[%d],q_max[%d]", i, g_gpu_dvfs_data[i].min_threadhold, g_gpu_dvfs_data[i].max_threadhold, g_gpu_dvfs_data[i].quick_down_threadhold, g_gpu_dvfs_data[i].quick_up_threadhold)); } else { g_gpu_dvfs_data[i].min_threadhold = default_dvfs_data[i].min_threadhold; g_gpu_dvfs_data[i].max_threadhold = default_dvfs_data[i].max_threadhold; g_gpu_dvfs_data[i].quick_down_threadhold = default_dvfs_data[i].quick_down_threadhold; g_gpu_dvfs_data[i].quick_up_threadhold = default_dvfs_data[i].quick_up_threadhold; PVR_LOG(("set threshold value restore level[%d] min[%d],max[%d],q_min[%d],q_max[%d]", i, g_gpu_dvfs_data[i].min_threadhold, g_gpu_dvfs_data[i].max_threadhold, g_gpu_dvfs_data[i].quick_down_threadhold, g_gpu_dvfs_data[i].quick_up_threadhold)); } } custom_threshold_change = 0; return 1; }
IMG_INTERNAL void PVRSRVDumpRefCountCCB(void) { int i; PVRSRV_LOCK_CCB(); PVR_LOG(("%s", gszHeader)); for(i = 0; i < PVRSRV_REFCOUNT_CCB_MAX; i++) { PVRSRV_REFCOUNT_CCB *psRefCountCCBEntry = &gsRefCountCCB[(giOffset + i) % PVRSRV_REFCOUNT_CCB_MAX]; /* Early on, we won't have MAX_REFCOUNT_CCB_SIZE messages */ if(!psRefCountCCBEntry->pszFile) break; PVR_LOG(("%s %d %s:%d", psRefCountCCBEntry->pcMesg, psRefCountCCBEntry->ui32PID, psRefCountCCBEntry->pszFile, psRefCountCCBEntry->iLine)); } PVRSRV_UNLOCK_CCB(); }
/* this is for power gating */ int gpu_power_enable(void) { #ifdef CONFIG_PM_RUNTIME int err; int try_count = 50; err = pm_runtime_get_sync(&gpsPVRLDMDev->dev); if (err && pm_runtime_suspended(&gpsPVRLDMDev->dev)) { PVR_DPF((PVR_DBG_ERROR, "Error in pm_runtime_get_sync")); return err; } do { /* wait for gpu power turned on */ if (!pm_runtime_suspended(&gpsPVRLDMDev->dev)) break; if (try_count == 0) PVR_LOG(("enable_gpu_power on fail with pm_runtime_suspended")); schedule(); } while (try_count--); /*this is debug for runtimepm power gating state*/ { void __iomem *status; status = EXYNOS_PMUREG(0x4080); sgx_gpu_power_state = __raw_readl(status); #ifdef PM_RUNTIME_DEBUG PVR_LOG(("enable_gpu_power: read register: 0x%x", sgx_gpu_power_state)); #endif } #endif return 0; }
/* PVRSRVHWOpTimeoutKM */ PVRSRV_ERROR PVRSRVHWOpTimeoutKM(IMG_VOID) { #if defined(PVRSRV_RESET_ON_HWTIMEOUT) PVR_LOG(("User requested OS reset")); OSPanic(); #endif PVR_LOG(("HW operation timeout, dump server info")); PVRSRVDebugRequest(DEBUG_REQUEST_VERBOSITY_LOW,IMG_NULL); return PVRSRV_OK; }
/*! ****************************************************************************** @Function PVRSRVDumpTimeTraceBuffer @Description Dump the contents of the trace buffer. @Input hKey : Trace item's group ID @Input hData : Trace item's ui32Token ID @Return Error ******************************************************************************/ static PVRSRV_ERROR PVRSRVDumpTimeTraceBuffer(IMG_UINTPTR_T hKey, IMG_UINTPTR_T hData) { sTimeTraceBuffer *psBuffer = (sTimeTraceBuffer *) hData; IMG_UINT32 ui32ByteCount = psBuffer->ui32ByteCount; IMG_UINT32 ui32Walker = psBuffer->ui32Roff; IMG_UINT32 ui32Read, ui32LineLen, ui32EOL, ui32MinLine; PVR_LOG(("TTB for PID %u:", (IMG_UINT32) hKey)); while (ui32ByteCount) { IMG_UINT32 *pui32Buffer = (IMG_UINT32 *) &psBuffer->pui8Data[ui32Walker]; ui32LineLen = (ui32ByteCount/sizeof(IMG_UINT32)); ui32EOL = (TIME_TRACE_BUFFER_SIZE - ui32Walker)/sizeof(IMG_UINT32); ui32MinLine = (ui32LineLen < ui32EOL)?ui32LineLen:ui32EOL; if (ui32MinLine >= 4) { PVR_LOG(("\t(TTB-%X) %08X %08X %08X %08X", ui32ByteCount, pui32Buffer[0], pui32Buffer[1], pui32Buffer[2], pui32Buffer[3])); ui32Read = 4 * sizeof(IMG_UINT32); } else if (ui32MinLine >= 3) { PVR_LOG(("\t(TTB-%X) %08X %08X %08X", ui32ByteCount, pui32Buffer[0], pui32Buffer[1], pui32Buffer[2])); ui32Read = 3 * sizeof(IMG_UINT32); } else if (ui32MinLine >= 2) { PVR_LOG(("\t(TTB-%X) %08X %08X", ui32ByteCount, pui32Buffer[0], pui32Buffer[1])); ui32Read = 2 * sizeof(IMG_UINT32); } else { PVR_LOG(("\t(TTB-%X) %08X", ui32ByteCount, pui32Buffer[0])); ui32Read = sizeof(IMG_UINT32); } ui32Walker = (ui32Walker + ui32Read) & (TIME_TRACE_BUFFER_SIZE - 1); ui32ByteCount -= ui32Read; } return PVRSRV_OK; }
IMG_EXPORT PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) { PVRSRV_ERROR eError = PVRSRV_OK; /* Check if the FW has finished with this resource ... */ eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode, FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext), psComputeContext->psSync, RGXFWIF_DM_CDM); if (eError == PVRSRV_ERROR_RETRY) { return eError; } else if (eError != PVRSRV_OK) { PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError))); } /* ... it has so we can free its resources */ dllist_remove_node(&(psComputeContext->sListNode)); FWCommonContextFree(psComputeContext->psServerCommonContext); DevmemFwFree(psComputeContext->psFWFrameworkMemDesc); DevmemFwFree(psComputeContext->psFWComputeContextStateMemDesc); SyncPrimFree(psComputeContext->psSync); OSFreeMem(psComputeContext); return PVRSRV_OK; }
static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData, PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync) { PVRSRV_ERROR eError; /* Check if the FW has finished with this resource ... */ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, FWCommonContextGetFWAddress(ps3DData->psServerCommonContext), psCleanupSync, RGXFWIF_DM_3D); if (eError == PVRSRV_ERROR_RETRY) { return eError; } else if (eError != PVRSRV_OK) { PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError))); } /* ... it has so we can free it's resources */ DevmemFwFree(ps3DData->psFWContextStateMemDesc); FWCommonContextFree(ps3DData->psServerCommonContext); return PVRSRV_OK; }
int sec_clock_change_down(int level, int step) { sgx_dvfs_down_requirement--; if (sgx_dvfs_down_requirement > 0 ) return level; level += step; if (level > GPU_DVFS_MAX_LEVEL - 1) level = GPU_DVFS_MAX_LEVEL - 1; if (sgx_dvfs_min_lock) { if (level > custom_min_lock_level) level = custom_min_lock_level; } sgx_dvfs_down_requirement = gdata[level].stay_total_count; sec_gpu_vol_clk_change(gdata[level].clock, gdata[level].voltage); if ((g_debug_CCB_Info_Flag % g_debug_CCB_count) == 0) PVR_LOG(("SGX CCB RO : %d, WO : %d, Total : %d", *g_debug_CCB_Info_RO, *g_debug_CCB_Info_WO, g_debug_CCB_Info_WCNT)); g_debug_CCB_Info_WCNT = 0; g_debug_CCB_Info_Flag ++; return level; }
IMG_VOID RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr, IMG_DEV_PHYADDR *psDevPAddr) { RGX_FAULT_DATA sFaultData; IMG_DEV_PHYADDR sPCDevPAddr; sFaultData.psDevVAddr = psDevVAddr; sFaultData.psDevPAddr = psDevPAddr; OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); dllist_foreach_node(&psDevInfo->sMemoryContextList, _RGXCheckFaultAddress, &sFaultData); /* Lastly check for fault in the kernel allocated memory */ if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK) { PVR_LOG(("Failed to get PC address for kernel memory context")); } if (sFaultData.psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) { MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr); } OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); }
void sec_gpu_dvfs_handler(int utilization_value) { /*utilization_value is zero mean is gpu going to idle*/ if (utilization_value == 0) return; sgx_dvfs_level = sec_gpu_dvfs_level_from_clk_get(gpu_clock_get()); /* this check for current clock must be find in dvfs table */ if (sgx_dvfs_level < 0) { PVR_LOG(("WARN: current clock: %d MHz not found in DVFS table. so set to max clock", gpu_clock_get())); sec_gpu_vol_clk_change(gdata[BASE_START_LEVEL].clock, gdata[BASE_START_LEVEL].voltage); return; } PVR_DPF((PVR_DBG_MESSAGE, "INFO: AUTO DVFS [%d MHz] <%d, %d>, utilization [%d]", gpu_clock_get(), gdata[sgx_dvfs_level].min_threadhold, gdata[sgx_dvfs_level].max_threadhold, utilization_value)); /* check current level's threadhold value */ if (gdata[sgx_dvfs_level].min_threadhold > utilization_value) { /* need to down current clock */ sgx_dvfs_level = sec_clock_change_down(sgx_dvfs_level, BASE_DOWN_STEP_LEVEL); } else if (gdata[sgx_dvfs_level].max_threadhold < utilization_value) { /* need to up current clock */ sgx_dvfs_level = sec_clock_change_up(sgx_dvfs_level, BASE_UP_STEP_LEVEL); } else sgx_dvfs_down_requirement = gdata[sgx_dvfs_level].stay_total_count; g_g3dfreq = gdata[sgx_dvfs_level].clock; }
void sec_gpu_dvfs_init(void) { struct platform_device *pdev; int i = 0; ssize_t total = 0, offset = 0; memset(gdata, 0x00, sizeof(struct gpu_dvfs_data)*MAX_DVFS_LEVEL); for (i = 0; i < GPU_DVFS_MAX_LEVEL; i++) { gdata[i].level = default_dvfs_data[i].level; gdata[i].clock = default_dvfs_data[i].clock; gdata[i].voltage = get_match_volt(ID_G3D, default_dvfs_data[i].clock * 1000); gdata[i].clock_source = default_dvfs_data[i].clock_source; gdata[i].min_threadhold = default_dvfs_data[i].min_threadhold; gdata[i].max_threadhold = default_dvfs_data[i].max_threadhold; gdata[i].quick_down_threadhold = default_dvfs_data[i].quick_down_threadhold; gdata[i].quick_up_threadhold = default_dvfs_data[i].quick_up_threadhold; gdata[i].stay_total_count = default_dvfs_data[i].stay_total_count; gdata[i].mask = setmask(default_dvfs_data[i].level, default_dvfs_data[i].clock); PVR_LOG(("G3D DVFS Info: Level:%d, Clock:%d MHz, Voltage:%d uV", gdata[i].level, gdata[i].clock, gdata[i].voltage)); } /* default dvfs level depend on default clock setting */ sgx_dvfs_level = sec_gpu_dvfs_level_from_clk_get(gpu_clock_get()); sgx_dvfs_down_requirement = DOWN_REQUIREMENT_THRESHOLD; pdev = gpsPVRLDMDev; /* Required name attribute */ if (device_create_file(&pdev->dev, &dev_attr_sgx_dvfs_min_lock) < 0) PVR_LOG(("device_create_file: dev_attr_sgx_dvfs_min_lock fail")); if (device_create_file(&pdev->dev, &dev_attr_sgx_dvfs_max_lock) < 0) PVR_LOG(("device_create_file: dev_attr_sgx_dvfs_max_lock fail")); if (device_create_file(&pdev->dev, &dev_attr_sgx_dvfs_volt_table) < 0) PVR_LOG(("device_create_file: dev_attr_sgx_dvfs_volt_table fail")); /* Generate DVFS table list*/ for( i = 0; i < GPU_DVFS_MAX_LEVEL ; i++) { offset = sprintf(sgx_dvfs_table_string+total, "%d\n", gdata[i].clock); total += offset; } sgx_dvfs_table = sgx_dvfs_table_string; if (device_create_file(&pdev->dev, &dev_attr_sgx_dvfs_table) < 0) PVR_LOG(("device_create_file: dev_attr_sgx_dvfs_table fail")); }
int gpu_clock_enable() { int err = 0; err = clk_enable(sgx_core); if (err) { PVR_LOG(("SGX sgx_core clock enable fail!")); return err; } err = clk_enable(sgx_hyd); if (err) { PVR_LOG(("SGX sgx_hyd clock enable fail!")); return err; } #if defined(CONFIG_EXYNOS5410_BTS) sgx_clk_status = 1; #endif return err; }
int gpu_clock_set_parent() { int err = 0; err = clk_set_parent(mout_g3d, vpll_clock); if (err) { PVR_LOG(("SGX mout_g3d clk_set_parent fail!")); return err; } err = clk_set_parent(g3d_clock_core_sub, g3d_clock_core); if (err) { PVR_LOG(("SGX g3d_clock_core_sub clk_set_parent fail!")); return err; } err = clk_set_parent(g3d_clock_hydra_sub, g3d_clock_hydra); if (err) { PVR_LOG(("SGX g3d_clock_hydra_sub clk_set_parent fail!")); return err; } return err; }
/* PVRSRVDumpDebugInfoKM */ PVRSRV_ERROR PVRSRVDumpDebugInfoKM(IMG_UINT32 ui32VerbLevel) { if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX) { return PVRSRV_ERROR_INVALID_PARAMS; } PVR_LOG(("User requested PVR debug info")); PVRSRVDebugRequest(ui32VerbLevel, IMG_NULL); return PVRSRV_OK; }
/*this function using for DVFS*/ void gpu_clock_set(int sgx_clk) { int old_clk = clk_get_rate(g3d_clock_core)/MHZ; if (clk_get_rate(fout_vpll_clock)/MHZ != sgx_clk) sgx_gpu_src_clk = clk_set_rate(fout_vpll_clock, sgx_clk * MHZ); if (clk_get_rate(g3d_clock_core)/MHZ != sgx_clk) clk_set_rate(g3d_clock_core, sgx_clk * MHZ); if (clk_get_rate(g3d_clock_hydra)/MHZ != sgx_clk) clk_set_rate(g3d_clock_hydra, sgx_clk * MHZ); sgx_gpu_clk = clk_get_rate(g3d_clock_core)/MHZ; #ifdef DEBUG_BW { unsigned int mif_sdiv; mif_sdiv = __raw_readl(EXYNOS5_BPLL_CON0); mif_sdiv &= 0x7; #if defined(CONFIG_EXYNOS5410_BTS) { unsigned int bts = 0; if (sgx_clk_status && __raw_readl(sgx_bts_base+0)) bts = __raw_readl(sgx_bts_base+0xc); else bts = 0; PVR_LOG(("SGX change clock [%d] Mhz -> [%d] MHz req [%d] MHz / M[%d] / B[%d]", old_clk, sgx_gpu_clk, sgx_clk, (800/mif_sdiv), bts)); } #else PVR_LOG(("SGX change clock [%d] Mhz -> [%d] MHz req [%d] MHz / M[%d]", old_clk, sgx_gpu_clk, sgx_clk, (800/mif_sdiv))); #endif } #endif }
static IMG_BOOL _RGXCheckFaultAddress(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData) { SERVER_MMU_CONTEXT *psServerMMUContext = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); RGX_FAULT_DATA *psFaultData = (RGX_FAULT_DATA *) pvCallbackData; IMG_DEV_PHYADDR sPCDevPAddr; if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) { PVR_LOG(("Failed to get PC address for memory context")); return IMG_TRUE; } if (psFaultData->psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) { PVR_LOG(("Found memory context (PID = %d, %s)", psServerMMUContext->uiPID, psServerMMUContext->szProcessName)); MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psFaultData->psDevVAddr); return IMG_FALSE; } return IMG_TRUE; }
/* gpu clock setting*/ void sec_gpu_vol_clk_change(int sgx_clock, int sgx_voltage) { int cur_sgx_clock; mutex_lock(&lock); cur_sgx_clock = gpu_clock_get(); sgx_voltage += gpu_voltage_marin; #if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ) if (sec_gpu_power_on) { if (sgx_clock >= sec_gpu_top_clock) { #ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ pm_qos_update_request(&exynos5_g3d_cpu_qos, 600000); #else pm_qos_update_request(&exynos5_g3d_cpu_qos, 800000); #endif } if (sgx_clock < MIF_THRESHHOLD_VALUE_CLK) pm_qos_update_request(&exynos5_g3d_mif_qos, 267000); else pm_qos_update_request(&exynos5_g3d_mif_qos, 800000); } else { pm_qos_update_request(&exynos5_g3d_cpu_qos, 0); pm_qos_update_request(&exynos5_g3d_int_qos, 0); pm_qos_update_request(&exynos5_g3d_mif_qos, 0); } #endif if (sec_gpu_power_on) { if (cur_sgx_clock > sgx_clock) { gpu_clock_set(sgx_clock); gpu_voltage_set(sgx_voltage); } else if (cur_sgx_clock < sgx_clock) { gpu_voltage_set(sgx_voltage); gpu_clock_set(sgx_clock); } sec_gpu_setting_clock = gpu_clock_get(); sec_gpu_setting_voltage = gpu_voltage_get(); } else { sec_gpu_setting_clock = sgx_clock; sec_gpu_setting_voltage = sgx_voltage; PVR_LOG(("SGX keep DVFS info sgx_clock:%d MHz, sgx_voltage:%d mV ", sgx_clock, sgx_voltage)); } mutex_unlock(&lock); }
IMG_EXPORT PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) { PVRSRV_ERROR eError = PVRSRV_OK; PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; /* Check if the FW has finished with this resource ... */ eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode, psComputeContext->psServerCommonContext, psComputeContext->psSync, RGXFWIF_DM_CDM); if (eError == PVRSRV_ERROR_RETRY) { return eError; } else if (eError != PVRSRV_OK) { PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError))); return eError; } /* ... it has so we can free its resources */ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); dllist_remove_node(&(psComputeContext->sListNode)); OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); FWCommonContextFree(psComputeContext->psServerCommonContext); DevmemFwFree(psComputeContext->psFWFrameworkMemDesc); DevmemFwFree(psComputeContext->psFWComputeContextStateMemDesc); SyncPrimFree(psComputeContext->psSync); SyncAddrListDeinit(&psComputeContext->sSyncAddrListFence); SyncAddrListDeinit(&psComputeContext->sSyncAddrListUpdate); OSFreeMem(psComputeContext); return PVRSRV_OK; }
static IMG_BOOL _RGXFindMMUContext(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData) { SERVER_MMU_CONTEXT *psServerMMUContext = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); RGX_FIND_MMU_CONTEXT *psData = pvCallbackData; IMG_DEV_PHYADDR sPCDevPAddr; if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) { PVR_LOG(("Failed to get PC address for memory context")); return IMG_TRUE; } if (psData->sPCAddress.uiAddr == sPCDevPAddr.uiAddr) { psData->psServerMMUContext = psServerMMUContext; return IMG_FALSE; } return IMG_TRUE; }
PVRSRV_ERROR SysDebugInfo(PVRSRV_SYSTEM_CONFIG *psSysConfig) { PVRSRV_DEVICE_CONFIG *psDevice; IMG_CPU_PHYADDR sWrapperRegsCpuPBase; IMG_VOID *pvWrapperRegs; psDevice = &sSysConfig.pasDevices[0]; sWrapperRegsCpuPBase.uiAddr = psDevice->sRegsCpuPBase.uiAddr + EMULATOR_RGX_REG_WRAPPER_OFFSET; /* map emu registers */ pvWrapperRegs = OSMapPhysToLin(sWrapperRegsCpuPBase, EMULATOR_RGX_REG_WRAPPER_SIZE, 0); if (pvWrapperRegs == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR,"SysDebugDump: Failed to create wrapper register mapping\n")); return PVRSRV_ERROR_BAD_MAPPING; } PVR_LOG(("------[ System Debug ]------")); #define SYS_EMU_DBG_R32(R) PVR_LOG(("%-25s 0x%08X", #R ":", OSReadHWReg32(pvWrapperRegs, R))) #define SYS_EMU_DBG_R64(R) PVR_LOG(("%-25s 0x%010llX", #R ":", OSReadHWReg64(pvWrapperRegs, R))) SYS_EMU_DBG_R32(EMU_CR_PCI_MASTER); SYS_EMU_DBG_R64(EMU_CR_WRAPPER_ERROR); SYS_EMU_DBG_R32(EMU_CR_BANK_OUTSTANDING0); SYS_EMU_DBG_R32(EMU_CR_BANK_OUTSTANDING1); SYS_EMU_DBG_R32(EMU_CR_BANK_OUTSTANDING2); SYS_EMU_DBG_R32(EMU_CR_BANK_OUTSTANDING3); SYS_EMU_DBG_R32(EMU_CR_MEMORY_LATENCY); /* remove mapping */ OSUnMapPhysToLin(pvWrapperRegs, EMULATOR_RGX_REG_WRAPPER_SIZE, 0); return PVRSRV_OK; }
int sec_clock_change_up(int level, int step) { level -= step; if (level < 0) level = 0; if (sgx_dvfs_max_lock) { if (level < custom_max_lock_level) level = custom_max_lock_level; } sgx_dvfs_down_requirement = gdata[level].stay_total_count; sec_gpu_vol_clk_change(gdata[level].clock, gdata[level].voltage); if ((g_debug_CCB_Info_Flag % g_debug_CCB_count) == 0) PVR_LOG(("SGX CCB RO : %d, WO : %d, Total : %d", *g_debug_CCB_Info_RO, *g_debug_CCB_Info_WO, g_debug_CCB_Info_WCNT)); g_debug_CCB_Info_WCNT = 0; g_debug_CCB_Info_Flag ++; return level; }
static ssize_t get_max_clock(struct device *d, struct device_attribute *a, char *buf) { PVR_LOG(("get_max_clock: %d MHz", sgx_dvfs_max_lock)); return sprintf(buf, "%d\n", sgx_dvfs_max_lock); }
/*! ****************************************************************************** @Function PVRSRVGetMiscInfoKM @Description Retrieves misc. info. @Output PVRSRV_MISC_INFO @Return PVRSRV_ERROR : ******************************************************************************/ IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo) { SYS_DATA *psSysData; if(!psMiscInfo) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters")); return PVRSRV_ERROR_INVALID_PARAMS; } psMiscInfo->ui32StatePresent = 0; /* do a basic check for uninitialised request flag */ if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT |PVRSRV_MISC_INFO_MEMSTATS_PRESENT |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT |PVRSRV_MISC_INFO_DDKVERSION_PRESENT |PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT |PVRSRV_MISC_INFO_RESET_PRESENT |PVRSRV_MISC_INFO_FREEMEM_PRESENT |PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT |PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT |PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT)) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags")); return PVRSRV_ERROR_INVALID_PARAMS; } SysAcquireData(&psSysData); /* return SOC Timer registers */ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) && (psSysData->pvSOCTimerRegisterKM != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT; psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM; psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle; } else { psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL; psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL; } /* return SOC Clock Gating registers */ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) && (psSysData->pvSOCClockGateRegsBase != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT; psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase; psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize; } /* memory stats */ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) && (psMiscInfo->pszMemoryStr != IMG_NULL)) { RA_ARENA **ppArena; /* BM_HEAP *psBMHeap; BM_CONTEXT *psBMContext; PVRSRV_DEVICE_NODE *psDeviceNode;*/ IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_INT32 i32Count; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT; /* Local backing stores */ ppArena = &psSysData->apsLocalDevMemArena[0]; while(*ppArena) { CHECK_SPACE(ui32StrLen); i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); RA_GetStats(*ppArena, &pszStr, &ui32StrLen); /* advance through the array */ ppArena++; } /* per device */ /* psDeviceNode = psSysData->psDeviceNodeList;*/ /*triple loop; devices:contexts:heaps*/ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, &PVRSRVGetMiscInfoKM_Device_AnyVaCb, &ui32StrLen, &i32Count, &pszStr, PVRSRV_MISC_INFO_MEMSTATS_PRESENT); /* attach a new line and string terminate */ i32Count = OSSNPrintf(pszStr, 100, "\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } /* Lean version of mem stats: only show free mem on each RA */ if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0) && psMiscInfo->pszMemoryStr) { IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_INT32 i32Count; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FREEMEM_PRESENT; /* triple loop over devices:contexts:heaps */ List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, &PVRSRVGetMiscInfoKM_Device_AnyVaCb, &ui32StrLen, &i32Count, &pszStr, PVRSRV_MISC_INFO_FREEMEM_PRESENT); i32Count = OSSNPrintf(pszStr, 100, "\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) && (psSysData->psGlobalEventObject != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT; psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject; } /* DDK version and memstats not supported in same call to GetMiscInfo */ if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL) && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL) && (psMiscInfo->pszMemoryStr != IMG_NULL)) { IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_UINT32 ui32LenStrPerNum = 12; /* string length per UI32: 10 digits + '.' + '\0' = 12 bytes */ IMG_INT32 i32Count; IMG_INT i; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT; /* construct DDK string */ psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ; psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN; psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BUILD_HI; psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD_LO; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; for (i=0; i<4; i++) { if (ui32StrLen < ui32LenStrPerNum) { return PVRSRV_ERROR_INVALID_PARAMS; } i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%u", psMiscInfo->aui32DDKVersion[i]); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); if (i != 3) { i32Count = OSSNPrintf(pszStr, 2, "."); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } } } if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT) != 0UL) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT; if(psMiscInfo->sCacheOpCtl.bDeferOp) { /* For now, assume deferred ops are "full" cache ops, * and we don't need (or expect) a meminfo. */ psSysData->ePendingCacheOpType = psMiscInfo->sCacheOpCtl.eCacheOpType; } else { PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; PVRSRV_PER_PROCESS_DATA *psPerProc; if(!psMiscInfo->sCacheOpCtl.u.psKernelMemInfo) { PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " "Ignoring non-deferred cache op with no meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } if(psSysData->ePendingCacheOpType != PVRSRV_MISC_INFO_CPUCACHEOP_NONE) { PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " "Deferred cache op is pending. It is unlikely you want " "to combine deferred cache ops with immediate ones")); } psPerProc = PVRSRVFindPerProcessData(); if(PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_PVOID *)&psKernelMemInfo, psMiscInfo->sCacheOpCtl.u.psKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " "Can't find kernel meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) { if(!OSFlushCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, 0, psMiscInfo->sCacheOpCtl.pvBaseVAddr, psMiscInfo->sCacheOpCtl.ui32Length)) { return PVRSRV_ERROR_CACHEOP_FAILED; } } else if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) { if(psMiscInfo->sCacheOpCtl.ui32Length!=0) { if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, 0, psMiscInfo->sCacheOpCtl.pvBaseVAddr, psMiscInfo->sCacheOpCtl.ui32Length)) { return PVRSRV_ERROR_CACHEOP_FAILED; } } } } } if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT) != 0UL) { PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; PVRSRV_PER_PROCESS_DATA *psPerProc; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT; psPerProc = PVRSRVFindPerProcessData(); if(PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_PVOID *)&psKernelMemInfo, psMiscInfo->sGetRefCountCtl.u.psKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " "Can't find kernel meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } psMiscInfo->sGetRefCountCtl.ui32RefCount = psKernelMemInfo->ui32RefCount; } if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT) != 0UL) { psMiscInfo->ui32PageSize = HOST_PAGESIZE(); psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT; } #if defined(PVRSRV_RESET_ON_HWTIMEOUT) if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL) { PVR_LOG(("User requested OS reset")); OSPanic(); } #endif /* #if defined(PVRSRV_RESET_ON_HWTIMEOUT) */ #if defined(SUPPORT_PVRSRV_DEVICE_CLASS) if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT) != 0UL) { PVRSRVProcessQueues(IMG_TRUE); psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT; } #endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */ return PVRSRV_OK; }
void gpu_voltage_set(int sgx_vol) { PVR_LOG(("SGX change voltage [%d] -> [%d] mV", sgx_gpu_vol, sgx_vol)); regulator_set_voltage(g3d_pd_regulator, sgx_vol, sgx_vol); sgx_gpu_vol = regulator_get_voltage(g3d_pd_regulator); }
/* EmuReset */ static PVRSRV_ERROR EmuReset(IMG_CPU_PHYADDR sRegsCpuPBase) { IMG_CPU_PHYADDR sWrapperRegsCpuPBase; IMG_VOID *pvWrapperRegs; IMG_UINT32 ui32MemLatency; sWrapperRegsCpuPBase.uiAddr = sRegsCpuPBase.uiAddr + EMULATOR_RGX_REG_WRAPPER_OFFSET; /* Create a temporary mapping of the wrapper registers in order to reset the emulator design. */ pvWrapperRegs = OSMapPhysToLin(sWrapperRegsCpuPBase, EMULATOR_RGX_REG_WRAPPER_SIZE, 0); if (pvWrapperRegs == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR,"EmuReset: Failed to create wrapper register mapping\n")); return PVRSRV_ERROR_BAD_MAPPING; } /* Set the memory latency. This needs to be done before the soft reset to ensure it applies to all aspects of the emulator. */ ui32MemLatency = EmuMemLatencyGet(); if (ui32MemLatency != 0) { PVR_LOG(("EmuReset: Mem latency = 0x%X", ui32MemLatency)); } OSWriteHWReg32(pvWrapperRegs, EMU_CR_MEMORY_LATENCY, ui32MemLatency); (void) OSReadHWReg32(pvWrapperRegs, EMU_CR_MEMORY_LATENCY); /* Emu reset. */ OSWriteHWReg32(pvWrapperRegs, EMU_CR_SOFT_RESET, EMU_CR_SOFT_RESET_SYS_EN|EMU_CR_SOFT_RESET_MEM_EN|EMU_CR_SOFT_RESET_CORE_EN); /* Flush register write */ (void) OSReadHWReg32(pvWrapperRegs, EMU_CR_SOFT_RESET); OSWaitus(10); OSWriteHWReg32(pvWrapperRegs, EMU_CR_SOFT_RESET, 0x0); /* Flush register write */ (void) OSReadHWReg32(pvWrapperRegs, EMU_CR_SOFT_RESET); OSWaitus(10); #if !defined(LMA) /* If we're UMA then enable bus mastering */ OSWriteHWReg32(pvWrapperRegs, EMU_CR_PCI_MASTER, EMU_CR_PCI_MASTER_MODE_EN); #else /* otherwise disable it: the emu regbank is not resetable */ OSWriteHWReg32(pvWrapperRegs, EMU_CR_PCI_MASTER, 0x0); #endif /* Flush register write */ (void) OSReadHWReg32(pvWrapperRegs, EMU_CR_PCI_MASTER); /* Remove the temporary register mapping. */ OSUnMapPhysToLin(pvWrapperRegs, EMULATOR_RGX_REG_WRAPPER_SIZE, 0); return PVRSRV_OK; }
PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo) #endif { SYS_DATA *psSysData; if(!psMiscInfo) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters")); return PVRSRV_ERROR_INVALID_PARAMS; } psMiscInfo->ui32StatePresent = 0; if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT |PVRSRV_MISC_INFO_MEMSTATS_PRESENT |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT |PVRSRV_MISC_INFO_DDKVERSION_PRESENT |PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT |PVRSRV_MISC_INFO_RESET_PRESENT |PVRSRV_MISC_INFO_FREEMEM_PRESENT)) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags")); return PVRSRV_ERROR_INVALID_PARAMS; } SysAcquireData(&psSysData); if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) && (psSysData->pvSOCTimerRegisterKM != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT; psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM; psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle; } else { psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL; psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL; } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) && (psSysData->pvSOCClockGateRegsBase != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT; psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase; psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize; } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) && (psMiscInfo->pszMemoryStr != IMG_NULL)) { RA_ARENA **ppArena; IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_INT32 i32Count; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT; ppArena = &psSysData->apsLocalDevMemArena[0]; while(*ppArena) { CHECK_SPACE(ui32StrLen); i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); RA_GetStats(*ppArena, &pszStr, &ui32StrLen); ppArena++; } List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, &PVRSRVGetMiscInfoKM_Device_AnyVaCb, &ui32StrLen, &i32Count, &pszStr, PVRSRV_MISC_INFO_MEMSTATS_PRESENT); i32Count = OSSNPrintf(pszStr, 100, "\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0) && psMiscInfo->pszMemoryStr) { IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_INT32 i32Count; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FREEMEM_PRESENT; List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, &PVRSRVGetMiscInfoKM_Device_AnyVaCb, &ui32StrLen, &i32Count, &pszStr, PVRSRV_MISC_INFO_FREEMEM_PRESENT); i32Count = OSSNPrintf(pszStr, 100, "\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) && (psSysData->psGlobalEventObject != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT; psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject; } if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL) && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL) && (psMiscInfo->pszMemoryStr != IMG_NULL)) { IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_UINT32 ui32LenStrPerNum = 12; IMG_INT32 i32Count; IMG_INT i; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT; psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ; psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN; psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BRANCH; psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; for (i=0; i<4; i++) { if (ui32StrLen < ui32LenStrPerNum) { return PVRSRV_ERROR_INVALID_PARAMS; } i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%u", psMiscInfo->aui32DDKVersion[i]); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); if (i != 3) { i32Count = OSSNPrintf(pszStr, 2, "."); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } } } if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT) != 0UL) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT; if(psMiscInfo->sCacheOpCtl.bDeferOp) { psSysData->ePendingCacheOpType = psMiscInfo->sCacheOpCtl.eCacheOpType; } else { #if defined (SUPPORT_SID_INTERFACE) PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = psMiscInfo->sCacheOpCtl.psKernelMemInfo; if(!psMiscInfo->sCacheOpCtl.psKernelMemInfo) #else PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; PVRSRV_PER_PROCESS_DATA *psPerProc; if(!psMiscInfo->sCacheOpCtl.u.psKernelMemInfo) #endif { PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " "Ignoring non-deferred cache op with no meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } if(psSysData->ePendingCacheOpType != PVRSRV_MISC_INFO_CPUCACHEOP_NONE) { PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " "Deferred cache op is pending. It is unlikely you want " "to combine deferred cache ops with immediate ones")); } #if defined (SUPPORT_SID_INTERFACE) PVR_DBG_BREAK #else psPerProc = PVRSRVFindPerProcessData(); if(PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_PVOID *)&psKernelMemInfo, psMiscInfo->sCacheOpCtl.u.psKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " "Can't find kernel meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } #endif if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) { if(!OSFlushCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, psMiscInfo->sCacheOpCtl.pvBaseVAddr, psMiscInfo->sCacheOpCtl.ui32Length)) { return PVRSRV_ERROR_CACHEOP_FAILED; } } else if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) { if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, psMiscInfo->sCacheOpCtl.pvBaseVAddr, psMiscInfo->sCacheOpCtl.ui32Length)) { return PVRSRV_ERROR_CACHEOP_FAILED; } } /* FIXME: Temporary fix needs to be revisited * LinuxMemArea struct listing is not registered for memory areas * wrapped through PVR2DMemWrap() call. For now, we are doing * cache flush/inv by grabbing the physical pages through * get_user_pages() for every blt call. */ else if (psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CUSTOM_FLUSH) { #if defined(CONFIG_OUTER_CACHE) && defined(PVR_NO_FULL_CACHE_OPS) if (1) { IMG_SIZE_T uPageOffset, uPageCount; IMG_VOID *pvPageAlignedCPUVAddr; IMG_SYS_PHYADDR *psIntSysPAddr = IMG_NULL; IMG_HANDLE hOSWrapMem = IMG_NULL; PVRSRV_ERROR eError; int i; uPageOffset = (IMG_UINTPTR_T)psMiscInfo->sCacheOpCtl.pvBaseVAddr & (HOST_PAGESIZE() - 1); uPageCount = HOST_PAGEALIGN(psMiscInfo->sCacheOpCtl.ui32Length + uPageOffset)/HOST_PAGESIZE(); pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psMiscInfo->sCacheOpCtl.pvBaseVAddr - uPageOffset); if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, uPageCount * sizeof(IMG_SYS_PHYADDR), (IMG_VOID **)&psIntSysPAddr, IMG_NULL, "Array of Page Addresses") != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); return PVRSRV_ERROR_OUT_OF_MEMORY; } eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr, uPageCount * HOST_PAGESIZE(), psIntSysPAddr, &hOSWrapMem); for (i = 0; i < uPageCount; i++) { outer_flush_range(psIntSysPAddr[i].uiAddr, psIntSysPAddr[i].uiAddr + HOST_PAGESIZE() -1); } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, uPageCount * sizeof(IMG_SYS_PHYADDR), psIntSysPAddr, IMG_NULL); OSReleasePhysPageAddr(hOSWrapMem); } #else OSFlushCPUCacheKM(); #endif /* CONFIG_OUTER_CACHE && PVR_NO_FULL_CACHE_OPS*/ } else if (psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CUSTOM_INV) { #if defined(CONFIG_OUTER_CACHE) /* TODO: Need to check full cache invalidation, but * currently it is not exported through * outer_cache interface. */ if (1) { IMG_SIZE_T uPageOffset, uPageCount; IMG_VOID *pvPageAlignedCPUVAddr; IMG_SYS_PHYADDR *psIntSysPAddr = IMG_NULL; IMG_HANDLE hOSWrapMem = IMG_NULL; PVRSRV_ERROR eError; int i; uPageOffset = (IMG_UINTPTR_T)psMiscInfo->sCacheOpCtl.pvBaseVAddr & (HOST_PAGESIZE() - 1); uPageCount = HOST_PAGEALIGN(psMiscInfo->sCacheOpCtl.ui32Length + uPageOffset)/HOST_PAGESIZE(); pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psMiscInfo->sCacheOpCtl.pvBaseVAddr - uPageOffset); if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, uPageCount * sizeof(IMG_SYS_PHYADDR), (IMG_VOID **)&psIntSysPAddr, IMG_NULL, "Array of Page Addresses") != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block")); return PVRSRV_ERROR_OUT_OF_MEMORY; } eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr, uPageCount * HOST_PAGESIZE(), psIntSysPAddr, &hOSWrapMem); for (i = 0; i < uPageCount; i++) { outer_inv_range(psIntSysPAddr[i].uiAddr, psIntSysPAddr[i].uiAddr + HOST_PAGESIZE() -1); } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, uPageCount * sizeof(IMG_SYS_PHYADDR), psIntSysPAddr, IMG_NULL); OSReleasePhysPageAddr(hOSWrapMem); } #endif /* CONFIG_OUTER_CACHE */ } } } #if defined(PVRSRV_RESET_ON_HWTIMEOUT) if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL) { PVR_LOG(("User requested OS reset")); OSPanic(); } #endif return PVRSRV_OK; }
PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo) #endif { SYS_DATA *psSysData; if(!psMiscInfo) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters")); return PVRSRV_ERROR_INVALID_PARAMS; } psMiscInfo->ui32StatePresent = 0; if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT |PVRSRV_MISC_INFO_MEMSTATS_PRESENT |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT |PVRSRV_MISC_INFO_DDKVERSION_PRESENT |PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT |PVRSRV_MISC_INFO_RESET_PRESENT |PVRSRV_MISC_INFO_FREEMEM_PRESENT |PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT)) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags")); return PVRSRV_ERROR_INVALID_PARAMS; } SysAcquireData(&psSysData); if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) && (psSysData->pvSOCTimerRegisterKM != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT; psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM; psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle; } else { psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL; psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL; } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) && (psSysData->pvSOCClockGateRegsBase != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT; psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase; psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize; } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) && (psMiscInfo->pszMemoryStr != IMG_NULL)) { RA_ARENA **ppArena; IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_INT32 i32Count; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT; ppArena = &psSysData->apsLocalDevMemArena[0]; while(*ppArena) { CHECK_SPACE(ui32StrLen); i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); RA_GetStats(*ppArena, &pszStr, &ui32StrLen); ppArena++; } List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, &PVRSRVGetMiscInfoKM_Device_AnyVaCb, &ui32StrLen, &i32Count, &pszStr, PVRSRV_MISC_INFO_MEMSTATS_PRESENT); i32Count = OSSNPrintf(pszStr, 100, "\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0) && psMiscInfo->pszMemoryStr) { IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_INT32 i32Count; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FREEMEM_PRESENT; List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, &PVRSRVGetMiscInfoKM_Device_AnyVaCb, &ui32StrLen, &i32Count, &pszStr, PVRSRV_MISC_INFO_FREEMEM_PRESENT); i32Count = OSSNPrintf(pszStr, 100, "\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) && (psSysData->psGlobalEventObject != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT; psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject; } if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL) && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL) && (psMiscInfo->pszMemoryStr != IMG_NULL)) { IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_UINT32 ui32LenStrPerNum = 12; IMG_INT32 i32Count; IMG_INT i; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT; psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ; psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN; psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BUILD_HI; psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD_LO; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; for (i=0; i<4; i++) { if (ui32StrLen < ui32LenStrPerNum) { return PVRSRV_ERROR_INVALID_PARAMS; } i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%u", psMiscInfo->aui32DDKVersion[i]); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); if (i != 3) { i32Count = OSSNPrintf(pszStr, 2, "."); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } } } if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT) != 0UL) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT; if(psMiscInfo->sCacheOpCtl.bDeferOp) { psSysData->ePendingCacheOpType = psMiscInfo->sCacheOpCtl.eCacheOpType; } else { #if defined (SUPPORT_SID_INTERFACE) PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = psMiscInfo->sCacheOpCtl.psKernelMemInfo; if(!psMiscInfo->sCacheOpCtl.psKernelMemInfo) #else PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; PVRSRV_PER_PROCESS_DATA *psPerProc; if(!psMiscInfo->sCacheOpCtl.u.psKernelMemInfo) #endif { PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " "Ignoring non-deferred cache op with no meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } if(psSysData->ePendingCacheOpType != PVRSRV_MISC_INFO_CPUCACHEOP_NONE) { PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " "Deferred cache op is pending. It is unlikely you want " "to combine deferred cache ops with immediate ones")); } #if defined (SUPPORT_SID_INTERFACE) PVR_DBG_BREAK #else psPerProc = PVRSRVFindPerProcessData(); if(PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_PVOID *)&psKernelMemInfo, psMiscInfo->sCacheOpCtl.u.psKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " "Can't find kernel meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } #endif if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) { if(!OSFlushCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, psMiscInfo->sCacheOpCtl.pvBaseVAddr, psMiscInfo->sCacheOpCtl.ui32Length)) { return PVRSRV_ERROR_CACHEOP_FAILED; } } else if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) { /*if(psMiscInfo->sCacheOpCtl.bStridedCacheOp == IMG_TRUE) { IMG_BYTE *pbRowStart, *pbRowEnd, *pbRowThresh; IMG_UINT32 ui32Stride; pbRowStart = psMiscInfo->sCacheOpCtl.pbRowStart; pbRowEnd = psMiscInfo->sCacheOpCtl.pbRowEnd; pbRowThresh = psMiscInfo->sCacheOpCtl.pbRowThresh; ui32Stride = psMiscInfo->sCacheOpCtl.ui32Stride; do { if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, (IMG_VOID *)pbRowStart, (IMG_UINT32)(pbRowEnd - pbRowStart))) { return PVRSRV_ERROR_CACHEOP_FAILED; } pbRowStart += ui32Stride; pbRowEnd += ui32Stride; } while(pbRowEnd <= pbRowThresh); } else { if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, psMiscInfo->sCacheOpCtl.pvBaseVAddr, psMiscInfo->sCacheOpCtl.ui32Length)) { return PVRSRV_ERROR_CACHEOP_FAILED; } }*/ if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, psMiscInfo->sCacheOpCtl.pvBaseVAddr, psMiscInfo->sCacheOpCtl.ui32Length)) return PVRSRV_ERROR_CACHEOP_FAILED; } } } if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT) != 0UL) { #if !defined (SUPPORT_SID_INTERFACE) PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; PVRSRV_PER_PROCESS_DATA *psPerProc; #endif psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT; #if defined (SUPPORT_SID_INTERFACE) PVR_DBG_BREAK #else psPerProc = PVRSRVFindPerProcessData(); if(PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_PVOID *)&psKernelMemInfo, psMiscInfo->sGetRefCountCtl.u.psKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " "Can't find kernel meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } psMiscInfo->sGetRefCountCtl.ui32RefCount = psKernelMemInfo->ui32RefCount; #endif } #if defined(PVRSRV_RESET_ON_HWTIMEOUT) if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL) { PVR_LOG(("User requested OS reset")); OSPanic(); } #endif return PVRSRV_OK; }
IMG_VOID DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, RGX_CLIENT_CCB *psCurrentClientCCB) { volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; IMG_UINT8 *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB; volatile IMG_UINT8 *pui8Ptr; IMG_UINT32 ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; IMG_UINT32 ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset; IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; pui8Ptr = pui8ClientCCBBuff + ui32SampledRdOff; if ((ui32SampledRdOff == ui32SampledDepOff) && (ui32SampledRdOff != ui32SampledWrOff)) { volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff + ui32SampledRdOff); RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; volatile IMG_UINT8 *pui8Ptr = (IMG_UINT8 *)psCommandHeader; /* CCB is stalled on a fence... */ if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) { RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *)(pui8Ptr + sizeof(*psCommandHeader)); IMG_UINT32 jj; /* Display details of the fence object on which the context is pending */ PVR_LOG(("FWCtx 0x%08X (%s) pending on %s:", sFWCommonContext.ui32Addr, (IMG_PCHAR)&psCurrentClientCCB->szName, _CCBCmdTypename(eCommandType))); for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++) { PVR_LOG((" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value)); } /* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */ pui8Ptr = (IMG_UINT8 *)psUFOPtr + psCommandHeader->ui32CmdSize; psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr; if( (IMG_UINTPTR_T)psCommandHeader != ((IMG_UINTPTR_T)pui8ClientCCBBuff + ui32SampledWrOff)) { PVR_LOG((" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType))); /* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */ pui8Ptr += sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize; psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr; /* If the next command is an update, display details of that so we can see what would then become unblocked */ if( (IMG_UINTPTR_T)psCommandHeader != ((IMG_UINTPTR_T)pui8ClientCCBBuff + ui32SampledWrOff)) { eCommandType = psCommandHeader->eCmdType; if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE) { psUFOPtr = (RGXFWIF_UFO *)((IMG_UINT8 *)psCommandHeader + sizeof(*psCommandHeader)); PVR_LOG((" preventing %s:",_CCBCmdTypename(eCommandType))); for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++) { PVR_LOG((" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value)); } } } else { PVR_LOG((" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr)); } } else { PVR_LOG((" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr)); } } } }
IMG_EXPORT PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo) { SYS_DATA *psSysData; if(!psMiscInfo) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters")); return PVRSRV_ERROR_INVALID_PARAMS; } psMiscInfo->ui32StatePresent = 0; if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT |PVRSRV_MISC_INFO_CLOCKGATE_PRESENT |PVRSRV_MISC_INFO_MEMSTATS_PRESENT |PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT |PVRSRV_MISC_INFO_DDKVERSION_PRESENT |PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT |PVRSRV_MISC_INFO_RESET_PRESENT |PVRSRV_MISC_INFO_FREEMEM_PRESENT)) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags")); return PVRSRV_ERROR_INVALID_PARAMS; } SysAcquireData(&psSysData); if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) && (psSysData->pvSOCTimerRegisterKM != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT; psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM; psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle; } else { psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL; psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL; } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) && (psSysData->pvSOCClockGateRegsBase != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT; psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase; psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize; } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) && (psMiscInfo->pszMemoryStr != IMG_NULL)) { RA_ARENA **ppArena; IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_INT32 i32Count; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT; ppArena = &psSysData->apsLocalDevMemArena[0]; while(*ppArena) { CHECK_SPACE(ui32StrLen); i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); RA_GetStats(*ppArena, &pszStr, &ui32StrLen); ppArena++; } List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, &PVRSRVGetMiscInfoKM_Device_AnyVaCb, &ui32StrLen, &i32Count, &pszStr, PVRSRV_MISC_INFO_MEMSTATS_PRESENT); i32Count = OSSNPrintf(pszStr, 100, "\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) && psMiscInfo->pszMemoryStr) { IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_INT32 i32Count; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FREEMEM_PRESENT; List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList, &PVRSRVGetMiscInfoKM_Device_AnyVaCb, &ui32StrLen, &i32Count, &pszStr, PVRSRV_MISC_INFO_FREEMEM_PRESENT); i32Count = OSSNPrintf(pszStr, 100, "\n"); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) && (psSysData->psGlobalEventObject != IMG_NULL)) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT; psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject; } if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL) && ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL) && (psMiscInfo->pszMemoryStr != IMG_NULL)) { IMG_CHAR *pszStr; IMG_UINT32 ui32StrLen; IMG_UINT32 ui32LenStrPerNum = 12; IMG_INT32 i32Count; PVRSRV_SGXDEV_INFO *sgx_dev_info; PVRSRV_SGX_MISCINFO_INFO *sgx_misc_info; PVRSRV_SGX_MISCINFO_FEATURES *sgx_features; unsigned long fw_ver; IMG_INT i; psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT; /* * Since the kernel driver has already made sure that the * firmware version is supported by the kernel driver in * SGXDevInitCompatCheck, it's redundant for the user space * part to perform the same check. In order to support older * user space libraries where this check hasn't yet been removed, * simply report the version of the downloaded firmware which * will result in an exact match in user space. */ sgx_dev_info = pvr_get_sgx_dev_info(); if (!sgx_dev_info || !sgx_dev_info->psKernelSGXMiscMemInfo || !sgx_dev_info->psKernelSGXMiscMemInfo->pvLinAddrKM) return PVRSRV_ERROR_INVALID_DEVICE; sgx_misc_info = sgx_dev_info->psKernelSGXMiscMemInfo->pvLinAddrKM; sgx_features = &sgx_misc_info->sSGXFeatures; fw_ver = sgx_features->ui32DDKVersion; psMiscInfo->aui32DDKVersion[0] = PVR_FW_VER_MAJOR(fw_ver); psMiscInfo->aui32DDKVersion[1] = PVR_FW_VER_MINOR(fw_ver); psMiscInfo->aui32DDKVersion[2] = PVR_FW_VER_BRANCH(fw_ver); psMiscInfo->aui32DDKVersion[3] = sgx_features->ui32DDKBuild; pszStr = psMiscInfo->pszMemoryStr; ui32StrLen = psMiscInfo->ui32MemoryStrLen; for (i=0; i<4; i++) { if (ui32StrLen < ui32LenStrPerNum) { return PVRSRV_ERROR_INVALID_PARAMS; } i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%u", psMiscInfo->aui32DDKVersion[i]); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); if (i != 3) { i32Count = OSSNPrintf(pszStr, 2, "."); UPDATE_SPACE(pszStr, i32Count, ui32StrLen); } } } if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT) != 0UL) { psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT; if(psMiscInfo->sCacheOpCtl.bDeferOp) { psSysData->ePendingCacheOpType = psMiscInfo->sCacheOpCtl.eCacheOpType; } else { PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo; PVRSRV_PER_PROCESS_DATA *psPerProc; if(!psMiscInfo->sCacheOpCtl.u.psKernelMemInfo) { PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " "Ignoring non-deferred cache op with no meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } if(psSysData->ePendingCacheOpType != PVRSRV_MISC_INFO_CPUCACHEOP_NONE) { PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: " "Deferred cache op is pending. It is unlikely you want " "to combine deferred cache ops with immediate ones")); } psPerProc = PVRSRVFindPerProcessData(); if(PVRSRVLookupHandle(psPerProc->psHandleBase, (IMG_PVOID *)&psKernelMemInfo, psMiscInfo->sCacheOpCtl.u.psKernelMemInfo, PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: " "Can't find kernel meminfo")); return PVRSRV_ERROR_INVALID_PARAMS; } if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH) { if(!OSFlushCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, psMiscInfo->sCacheOpCtl.pvBaseVAddr, psMiscInfo->sCacheOpCtl.ui32Length)) { return PVRSRV_ERROR_CACHEOP_FAILED; } } else if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN) { if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle, psMiscInfo->sCacheOpCtl.pvBaseVAddr, psMiscInfo->sCacheOpCtl.ui32Length)) { return PVRSRV_ERROR_CACHEOP_FAILED; } } } } #if defined(PVRSRV_RESET_ON_HWTIMEOUT) if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL) { PVR_LOG(("User requested OS reset")); OSPanic(); } #endif return PVRSRV_OK; }
void sec_gpu_dvfs_handler(int utilization_value) { if (custom_threshold_change) sec_custom_threshold_set(); /*utilization_value is zero mean is gpu going to idle*/ if (utilization_value == 0) return; #ifdef CONFIG_ASV_MARGIN_TEST sgx_dvfs_custom_clock = set_g3d_freq; #endif /* this check for custom dvfs setting - 0:auto, others: custom lock clock*/ if (sgx_dvfs_custom_clock) { if (sgx_dvfs_custom_clock != gpu_clock_get()) { sgx_dvfs_level = sec_gpu_dvfs_level_from_clk_get(sgx_dvfs_custom_clock); /* this check for current clock must be find in dvfs table */ if (sgx_dvfs_level < 0) { PVR_LOG(("WARN: custom clock: %d MHz not found in DVFS table", sgx_dvfs_custom_clock)); return; } if (sgx_dvfs_level < MAX_DVFS_LEVEL && sgx_dvfs_level >= 0) { sec_gpu_vol_clk_change(g_gpu_dvfs_data[sgx_dvfs_level].clock, g_gpu_dvfs_data[sgx_dvfs_level].voltage); PVR_LOG(("INFO: CUSTOM DVFS [%d MHz] (%d, %d), utilization [%d] -(%d MHz)", gpu_clock_get(), g_gpu_dvfs_data[sgx_dvfs_level].min_threadhold, g_gpu_dvfs_data[sgx_dvfs_level].max_threadhold, utilization_value, sgx_dvfs_custom_clock )); } else { PVR_LOG(("INFO: CUSTOM DVFS [%d MHz] invalid clock - restore auto mode", sgx_dvfs_custom_clock)); sgx_dvfs_custom_clock = 0; } } } else { sgx_dvfs_level = sec_gpu_dvfs_level_from_clk_get(gpu_clock_get()); /* this check for current clock must be find in dvfs table */ if (sgx_dvfs_level < 0) { PVR_LOG(("WARN: current clock: %d MHz not found in DVFS table. so set to max clock", gpu_clock_get())); sec_gpu_vol_clk_change(g_gpu_dvfs_data[BASE_START_LEVEL].clock, g_gpu_dvfs_data[BASE_START_LEVEL].voltage); return; } PVR_DPF((PVR_DBG_MESSAGE, "INFO: AUTO DVFS [%d MHz] <%d, %d>, utilization [%d]", gpu_clock_get(), g_gpu_dvfs_data[sgx_dvfs_level].min_threadhold, g_gpu_dvfs_data[sgx_dvfs_level].max_threadhold, utilization_value)); /* check current level's threadhold value */ if (g_gpu_dvfs_data[sgx_dvfs_level].min_threadhold > utilization_value) { #if defined(USING_BOOST_DOWN_MODE) /* check need Quick up/down change */ if (g_gpu_dvfs_data[sgx_dvfs_level].quick_down_threadhold >= utilization_value) sgx_dvfs_level = sec_clock_change_down(sgx_dvfs_level, BASE_QUICK_DOWN_LEVEL); else #endif /* need to down current clock */ sgx_dvfs_level = sec_clock_change_down(sgx_dvfs_level, BASE_DWON_STEP_LEVEL); } else if (g_gpu_dvfs_data[sgx_dvfs_level].max_threadhold < utilization_value) { #if defined(USING_BOOST_UP_MODE) if (g_gpu_dvfs_data[sgx_dvfs_level].quick_up_threadhold <= utilization_value) sgx_dvfs_level = sec_clock_change_up(sgx_dvfs_level, BASE_QUICK_UP_LEVEL); else #endif /* need to up current clock */ sgx_dvfs_level = sec_clock_change_up(sgx_dvfs_level, BASE_UP_STEP_LEVEL); } else sgx_dvfs_down_requirement = g_gpu_dvfs_data[sgx_dvfs_level].stay_total_count; } g_g3dfreq = g_gpu_dvfs_data[sgx_dvfs_level].clock; }