IMG_INTERNAL IMG_VOID _DevmemImportStructRelease(DEVMEM_IMPORT *psImport) { PVR_ASSERT(psImport->ui32RefCount != 0); OSLockAcquire(psImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psImport->ui32RefCount, psImport->ui32RefCount-1); if (--psImport->ui32RefCount == 0) { OSLockRelease(psImport->hLock); BridgePMRUnrefPMR(psImport->hBridge, psImport->hPMR); OSLockDestroy(psImport->sCPUImport.hLock); OSLockDestroy(psImport->sDeviceImport.hLock); OSLockDestroy(psImport->hLock); OSFreeMem(psImport); } else { OSLockRelease(psImport->hLock); } }
IMG_VOID ServerSyncUnref(SERVER_SYNC_PRIMITIVE *psSync) { IMG_UINT32 ui32RefCount; OSLockAcquire(psSync->hLock); ui32RefCount = --psSync->ui32RefCount; OSLockRelease(psSync->hLock); if (ui32RefCount == 0) { SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d", __FUNCTION__, psSync, ui32RefCount); /* Remove the sync from the global list */ OSLockAcquire(g_hListLock); dllist_remove_node(&psSync->sNode); OSLockRelease(g_hListLock); OSLockDestroy(psSync->hLock); SyncPrimFree(psSync->psSync); OSFreeMem(psSync); } else { SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d", __FUNCTION__, psSync, ui32RefCount); } }
static IMG_VOID _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock) { if (psConnection) { SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData; /* Make sure the connection doesn't go away. It doesn't matter that we will release the lock between as the refcount and list don't have to be atomic w.r.t. to each other */ _SyncConnectionRef(psSyncConnectionData); OSLockAcquire(psSyncConnectionData->hLock); if (psConnection != IMG_NULL) { dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode); } OSLockRelease(psSyncConnectionData->hLock); psBlock->psSyncConnectionData = psSyncConnectionData; } else { psBlock->psSyncConnectionData = IMG_NULL; } }
static IMG_VOID _SyncPrimitiveBlockUnref(SYNC_PRIMITIVE_BLOCK *psSyncBlk) { IMG_UINT32 ui32RefCount; OSLockAcquire(psSyncBlk->hLock); ui32RefCount = --psSyncBlk->ui32RefCount; OSLockRelease(psSyncBlk->hLock); if (ui32RefCount == 0) { PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)", __FUNCTION__, psSyncBlk, ui32RefCount); _SyncConnectionRemoveBlock(psSyncBlk); OSLockDestroy(psSyncBlk->hLock); DevmemUnexport(psSyncBlk->psMemDesc, &psSyncBlk->sExportCookie); DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc); psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc); OSFreeMem(psSyncBlk); } else { SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d", __FUNCTION__, psSyncBlk, ui32RefCount); } }
/*! ****************************************************************************** @Function PVRSRVPowerUnlock @Description Release the mutex for power transitions @Return PVRSRV_ERROR ******************************************************************************/ IMG_EXPORT IMG_VOID PVRSRVPowerUnlock() { PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); OSLockRelease(psPVRSRVData->hPowerLock); }
/* Unmap an import from the Device */ IMG_INTERNAL IMG_VOID _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport) { PVRSRV_ERROR eError; DEVMEM_DEVICE_IMPORT *psDeviceImport; psDeviceImport = &psImport->sDeviceImport; OSLockAcquire(psDeviceImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psDeviceImport->ui32RefCount, psDeviceImport->ui32RefCount-1); if (--psDeviceImport->ui32RefCount == 0) { DEVMEM_HEAP *psHeap = psDeviceImport->psHeap; if (psDeviceImport->bMapped) { eError = BridgeDevmemIntUnmapPMR(psImport->hBridge, psDeviceImport->hMapping); PVR_ASSERT(eError == PVRSRV_OK); } eError = BridgeDevmemIntUnreserveRange(psImport->hBridge, psDeviceImport->hReservation); PVR_ASSERT(eError == PVRSRV_OK); RA_Free(psHeap->psQuantizedVMRA, psDeviceImport->sDevVAddr.uiAddr); OSLockRelease(psDeviceImport->hLock); _DevmemImportStructRelease(psImport); OSLockAcquire(psHeap->hLock); psHeap->uiImportCount--; OSLockRelease(psHeap->hLock); } else { OSLockRelease(psDeviceImport->hLock); } }
/* Unmap an import from the CPU */ IMG_INTERNAL IMG_VOID _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport) { DEVMEM_CPU_IMPORT *psCPUImport; psCPUImport = &psImport->sCPUImport; OSLockAcquire(psCPUImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psCPUImport->ui32RefCount, psCPUImport->ui32RefCount-1); if (--psCPUImport->ui32RefCount == 0) { #if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__)) PVR_ASSERT(psImport->uiSize<IMG_UINT32_MAX); #endif #if !defined(__KERNEL__) && defined(SUPPORT_ION) if (psImport->sCPUImport.iDmaBufFd >= 0) { munmap(psCPUImport->hOSMMapData, psImport->uiSize); } else #endif { OSMUnmapPMR(psImport->hBridge, psImport->hPMR, psCPUImport->hOSMMapData, psCPUImport->pvCPUVAddr, (IMG_SIZE_T)psImport->uiSize); } OSLockRelease(psCPUImport->hLock); _DevmemImportStructRelease(psImport); } else { OSLockRelease(psCPUImport->hLock); } }
PVRSRV_ERROR PVRSRVSyncRecordAddKM( SYNC_RECORD_HANDLE * phRecord, SYNC_PRIMITIVE_BLOCK * hServerSyncPrimBlock, IMG_UINT32 ui32FwBlockAddr, IMG_UINT32 ui32SyncOffset, IMG_BOOL bServerSync, IMG_UINT32 ui32ClassNameSize, const IMG_CHAR *pszClassName) { struct SYNC_RECORD * psSyncRec; PVRSRV_ERROR eError = PVRSRV_OK; if (!phRecord) { return PVRSRV_ERROR_INVALID_PARAMS; } *phRecord = IMG_NULL; psSyncRec = OSAllocMem(sizeof(*psSyncRec)); if (!psSyncRec) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto fail_alloc; } psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock; psSyncRec->ui32SyncOffset = ui32SyncOffset; psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT; if(pszClassName) { if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN) ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1; /* Copy over the class name annotation */ OSStringNCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); psSyncRec->szClassName[ui32ClassNameSize] = 0; } else { /* No class name annotation */ psSyncRec->szClassName[0] = 0; } OSLockAcquire(g_hSyncRecordListLock); dllist_add_to_head(&g_sSyncRecordList, &psSyncRec->sNode); OSLockRelease(g_hSyncRecordListLock); *phRecord = (SYNC_RECORD_HANDLE)psSyncRec; fail_alloc: return eError; }
IMG_VOID SyncConnectionPDumpSyncBlocks(SYNC_CONNECTION_DATA *psSyncConnectionData) { OSLockAcquire(psSyncConnectionData->hLock); PDUMPCOMMENT("Dump client Sync Prim state"); dllist_foreach_node(&psSyncConnectionData->sListHead, _PDumpSyncBlock, IMG_NULL); OSLockRelease(psSyncConnectionData->hLock); }
IMG_VOID ServerSyncDeinit(IMG_VOID) { PVRSRVUnregisterDbgRequestNotify(g_hNotify); OSLockDestroy(g_hListLock); #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) OSLockAcquire(g_hSyncRecordListLock); dllist_foreach_node(&g_sSyncRecordList, _SyncRecordListDestroy, IMG_NULL); OSLockRelease(g_hSyncRecordListLock); PVRSRVUnregisterDbgRequestNotify(g_hSyncRecordNotify); OSLockDestroy(g_hSyncRecordListLock); #endif }
static IMG_VOID _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData) { IMG_UINT32 ui32RefCount; OSLockAcquire(psSyncConnectionData->hLock); ui32RefCount = ++psSyncConnectionData->ui32RefCount; OSLockRelease(psSyncConnectionData->hLock); SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", __FUNCTION__, psSyncConnectionData, ui32RefCount); }
IMG_VOID ServerSyncRef(SERVER_SYNC_PRIMITIVE *psSync) { IMG_UINT32 ui32RefCount; OSLockAcquire(psSync->hLock); ui32RefCount = ++psSync->ui32RefCount; OSLockRelease(psSync->hLock); SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d", __FUNCTION__, psSync, ui32RefCount); }
static IMG_VOID _SyncPrimitiveBlockRef(SYNC_PRIMITIVE_BLOCK *psSyncBlk) { IMG_UINT32 ui32RefCount; OSLockAcquire(psSyncBlk->hLock); ui32RefCount = ++psSyncBlk->ui32RefCount; OSLockRelease(psSyncBlk->hLock); SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d", __FUNCTION__, psSyncBlk, ui32RefCount); }
/* given the physical address of a page catalogue, searches for a corresponding * MMU context and if found, provides the caller details of the process. * Returns IMG_TRUE if a process is found. */ IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, RGXMEM_PROCESS_INFO *psInfo) { RGX_FIND_MMU_CONTEXT sData; IMG_BOOL bRet = IMG_FALSE; sData.sPCAddress = sPCAddress; sData.psServerMMUContext = IMG_NULL; dllist_foreach_node(&psDevInfo->sMemoryContextList, _RGXFindMMUContext, &sData); if(sData.psServerMMUContext != IMG_NULL) { psInfo->uiPID = sData.psServerMMUContext->uiPID; OSStringNCopy(psInfo->szProcessName, sData.psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0'; psInfo->bUnregistered = IMG_FALSE; bRet = IMG_TRUE; } #if defined(SUPPORT_PAGE_FAULT_DEBUG) else { /* no active memory context found with the given PC address. * Check the list of most recently freed memory contexts. */ IMG_UINT32 i; OSLockAcquire(psDevInfo->hMMUCtxUnregLock); for(i = (gui32UnregisteredMemCtxsHead > 0) ? (gui32UnregisteredMemCtxsHead - 1) : UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE; i != gui32UnregisteredMemCtxsHead; i--) { UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i]; if(psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr) { psInfo->uiPID = psRecord->uiPID; OSStringNCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)-1); psInfo->szProcessName[sizeof(psInfo->szProcessName) - 1] = '\0'; psInfo->bUnregistered = IMG_TRUE; bRet = IMG_TRUE; break; } } OSLockRelease(psDevInfo->hMMUCtxUnregLock); } #endif return bRet; }
IMG_INTERNAL IMG_VOID _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport) { OSLockAcquire(psImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psImport->ui32RefCount, psImport->ui32RefCount+1); psImport->ui32RefCount++; OSLockRelease(psImport->hLock); }
IMG_INTERNAL IMG_VOID _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc) { OSLockAcquire(psMemDesc->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psMemDesc, psMemDesc->ui32RefCount, psMemDesc->ui32RefCount+1); psMemDesc->ui32RefCount++; OSLockRelease(psMemDesc->hLock); }
IMG_INTERNAL IMG_VOID _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc) { PVR_ASSERT(psMemDesc != NULL); PVR_ASSERT(psMemDesc->ui32RefCount != 0); OSLockAcquire(psMemDesc->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psMemDesc, psMemDesc->ui32RefCount, psMemDesc->ui32RefCount-1); if (--psMemDesc->ui32RefCount == 0) { OSLockRelease(psMemDesc->hLock); if (!psMemDesc->psImport->bExportable) { RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA, psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr + psMemDesc->uiOffset); } else { _DevmemImportStructRelease(psMemDesc->psImport); } OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); OSLockDestroy(psMemDesc->hLock); OSFreeMem(psMemDesc); } else { OSLockRelease(psMemDesc->hLock); } }
static IMG_VOID _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock) { SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData; if (psBlock->psSyncConnectionData) { OSLockAcquire(psSyncConnectionData->hLock); dllist_remove_node(&psBlock->sConnectionNode); OSLockRelease(psSyncConnectionData->hLock); _SyncConnectionUnref(psBlock->psSyncConnectionData); } }
static IMG_VOID _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, IMG_UINT32 ui32VerbLevel) { DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf = IMG_NULL; PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle); pfnDumpDebugPrintf = g_pfnDumpDebugPrintf; if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH) { PVR_DUMPDEBUG_LOG(("Dumping all sync records")); OSLockAcquire(g_hSyncRecordListLock); dllist_foreach_node(&g_sSyncRecordList, _SyncRecordPrint, IMG_NULL); OSLockRelease(g_hSyncRecordListLock); } }
/* record a device memory context being unregistered. * the list of unregistered contexts can be used to find the PID and process name * belonging to a memory context which has been destroyed */ static IMG_VOID _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext) { UNREGISTERED_MEMORY_CONTEXT *psRecord; OSLockAcquire(psDevInfo->hMMUCtxUnregLock); psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead]; gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1) & (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1); OSLockRelease(psDevInfo->hMMUCtxUnregLock); psRecord->uiPID = psServerMMUContext->uiPID; MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr); OSStringNCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName)); psRecord->szProcessName[sizeof(psRecord->szProcessName) - 1] = '\0'; }
/* SCPRun */ IMG_EXPORT PVRSRV_ERROR SCPRun(SCP_CONTEXT *psContext) { SCP_COMMAND *psCommand; if (psContext == IMG_NULL) { return PVRSRV_ERROR_INVALID_PARAMS; } OSLockAcquire(psContext->hLock); while (psContext->ui32DepOffset != psContext->ui32WriteOffset) { PVRSRV_ERROR eError; psCommand = (SCP_COMMAND *)((IMG_UINT8 *)psContext->pvCCB + psContext->ui32DepOffset); /* See if the command is ready to go */ eError = _SCPCommandReady(psCommand); SCP_DEBUG_PRINT("%s: Processes command %p for ctx %p (%d)", __FUNCTION__, psCommand, psContext, eError); if (eError == PVRSRV_OK) { /* processed cmd so update queue */ UPDATE_CCB_OFFSET(psContext->ui32DepOffset, psCommand->ui32CmdSize, psContext->ui32CCBSize); } else { /* As soon as we hit a command that can't run break out */ break; } /* Run the command */ _SCPCommandDo(psCommand); } OSLockRelease(psContext->hLock); return PVRSRV_OK; }
PVRSRV_ERROR PVRSRVSyncRecordRemoveByHandleKM( SYNC_RECORD_HANDLE hRecord) { struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord; if ( !hRecord ) { return PVRSRV_ERROR_INVALID_PARAMS; } OSLockAcquire(g_hSyncRecordListLock); dllist_remove_node(&pSync->sNode); OSLockRelease(g_hSyncRecordListLock); OSFreeMem(pSync); return PVRSRV_OK; }
static IMG_VOID _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData) { IMG_UINT32 ui32RefCount; OSLockAcquire(psSyncConnectionData->hLock); ui32RefCount = --psSyncConnectionData->ui32RefCount; OSLockRelease(psSyncConnectionData->hLock); if (ui32RefCount == 0) { SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", __FUNCTION__, psSyncConnectionData, ui32RefCount); PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead)); OSLockDestroy(psSyncConnectionData->hLock); OSFreeMem(psSyncConnectionData); } else { SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", __FUNCTION__, psSyncConnectionData, ui32RefCount); } }
PVRSRV_ERROR PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync, IMG_UINT32 *pui32FenceValue, IMG_UINT32 *pui32UpdateValue, IMG_UINT32 ui32SyncRequesterID, IMG_BOOL bUpdate, IMG_BOOL *pbFenceRequired) { ServerSyncRef(psSync); /* ServerSyncRef will acquire and release the lock but we need to reacquire here to ensure the state that we're modifying below will be consistent with itself. But it doesn't matter if another thread acquires the lock in between as we've ensured the sync wont go away */ OSLockAcquire(psSync->hLock); _ServerSyncTakeOperation(psSync, bUpdate, pui32FenceValue, pui32UpdateValue); /* The caller want to know if a fence command is required i.e. was the last operation done on this sync done by the the same sync requestor */ if (pbFenceRequired) { if (ui32SyncRequesterID == psSync->ui32LastSyncRequesterID) { *pbFenceRequired = IMG_FALSE; } else { *pbFenceRequired = IMG_TRUE; } } /* If we're transitioning from a HW operation to a SW operation we need to save the last update the HW will do so that when we PDump we can issue a POL for it before the next HW operation and then LDB in the last SW fence update */ if (psSync->bSWOperation == IMG_FALSE) { psSync->bSWOperation = IMG_TRUE; psSync->ui32LastHWUpdate = *pui32FenceValue; PDumpIsCaptureFrameKM(&psSync->bSWOpStartedInCaptRange); } if (pbFenceRequired) { if (*pbFenceRequired) { SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue); } } /* Only update the last requester id if we are make changes to this sync * object. */ if (bUpdate) psSync->ui32LastSyncRequesterID = ui32SyncRequesterID; OSLockRelease(psSync->hLock); return PVRSRV_OK; }
PVRSRV_ERROR PVRSRVServerSyncAllocKM(PVRSRV_DEVICE_NODE *psDevNode, SERVER_SYNC_PRIMITIVE **ppsSync, IMG_UINT32 *pui32SyncPrimVAddr, IMG_UINT32 ui32ClassNameSize, const IMG_CHAR *pszClassName) { SERVER_SYNC_PRIMITIVE *psNewSync; PVRSRV_ERROR eError; psNewSync = OSAllocMem(sizeof(SERVER_SYNC_PRIMITIVE)); if (psNewSync == IMG_NULL) { return PVRSRV_ERROR_OUT_OF_MEMORY; } /* szClassName must be setup now and used for the SyncPrimAlloc call because * pszClassName is allocated in the bridge code is not NULL terminated */ if(pszClassName) { if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN) ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN - 1; /* Copy over the class name annotation */ OSStringNCopy(psNewSync->szClassName, pszClassName, ui32ClassNameSize); psNewSync->szClassName[ui32ClassNameSize] = 0; } else { /* No class name annotation */ psNewSync->szClassName[0] = 0; } eError = SyncPrimAlloc(psDevNode->hSyncPrimContext, &psNewSync->psSync, psNewSync->szClassName); if (eError != PVRSRV_OK) { goto fail_sync_alloc; } eError = OSLockCreate(&psNewSync->hLock, LOCK_TYPE_NONE); if (eError != PVRSRV_OK) { goto fail_lock_create; } SyncPrimSet(psNewSync->psSync, 0); psNewSync->ui32NextOp = 0; psNewSync->ui32RefCount = 1; psNewSync->ui32UID = g_ServerSyncUID++; psNewSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN; psNewSync->bSWOperation = IMG_FALSE; psNewSync->ui32LastHWUpdate = 0x0bad592c; psNewSync->bPDumped = IMG_FALSE; /* Add the sync to the global list */ OSLockAcquire(g_hListLock); dllist_add_to_head(&g_sAllServerSyncs, &psNewSync->sNode); OSLockRelease(g_hListLock); *pui32SyncPrimVAddr = SyncPrimGetFirmwareAddr(psNewSync->psSync); SYNC_UPDATES_PRINT("%s: sync: %p, fwaddr: %8.8X", __FUNCTION__, psNewSync, *pui32SyncPrimVAddr); *ppsSync = psNewSync; return PVRSRV_OK; fail_lock_create: SyncPrimFree(psNewSync->psSync); fail_sync_alloc: OSFreeMem(psNewSync); return eError; }
/* Map an import into the CPU */ IMG_INTERNAL PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport) { PVRSRV_ERROR eError; DEVMEM_CPU_IMPORT *psCPUImport; IMG_SIZE_T uiMappingLength; psCPUImport = &psImport->sCPUImport; OSLockAcquire(psCPUImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psCPUImport->ui32RefCount, psCPUImport->ui32RefCount+1); if (psCPUImport->ui32RefCount++ == 0) { _DevmemImportStructAcquire(psImport); #if !defined(__KERNEL__) && defined(SUPPORT_ION) if (psImport->sCPUImport.iDmaBufFd >= 0) { void *pvCPUVAddr; /* For ion imports, use the ion fd and mmap facility to map the * buffer to user space. We can bypass the services bridge in * this case and possibly save some time. * * */ pvCPUVAddr = mmap(NULL, psImport->uiSize, PROT_READ | PROT_WRITE, MAP_SHARED, psImport->sCPUImport.iDmaBufFd, 0); if (pvCPUVAddr == MAP_FAILED) { eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED; goto failMap; } psCPUImport->hOSMMapData = pvCPUVAddr; psCPUImport->pvCPUVAddr = pvCPUVAddr; uiMappingLength = psImport->uiSize; } else #endif { eError = OSMMapPMR(psImport->hBridge, psImport->hPMR, psImport->uiSize, &psCPUImport->hOSMMapData, &psCPUImport->pvCPUVAddr, &uiMappingLength); if (eError != PVRSRV_OK) { goto failMap; } } /* There is no reason the mapping length is different to the size */ PVR_ASSERT(uiMappingLength == psImport->uiSize); } OSLockRelease(psCPUImport->hLock); return PVRSRV_OK; failMap: psCPUImport->ui32RefCount--; _DevmemImportStructRelease(psImport); OSLockRelease(psCPUImport->hLock); PVR_ASSERT(eError != PVRSRV_OK); return eError; }
PVRSRV_ERROR PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync, IMG_BOOL bUpdate, IMG_UINT32 *pui32FenceValue, IMG_UINT32 *pui32UpdateValue) { /* For HW operations the client is required to ensure the operation has completed before freeing the sync as we no way of dropping the refcount if we where to acquire it here. Take the lock to ensure the state that we're modifying below will be consistent with itself. */ OSLockAcquire(psSync->hLock); _ServerSyncTakeOperation(psSync, bUpdate, pui32FenceValue, pui32UpdateValue); /* Note: We might want to consider optimising the fences that we write for HW operations but for now just clear it back to unknown */ psSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN; if (psSync->bSWOperation) { IMG_CHAR azTmp[256]; OSSNPrintf(azTmp, sizeof(azTmp), "Wait for HW ops and dummy update for SW ops (0x%p, FW VAddr = 0x%08x, value = 0x%08x)\n", psSync, SyncPrimGetFirmwareAddr(psSync->psSync), *pui32FenceValue); PDumpCommentKM(azTmp, 0); if (psSync->bSWOpStartedInCaptRange) { /* Dump a POL for the previous HW operation */ SyncPrimPDumpPol(psSync->psSync, psSync->ui32LastHWUpdate, 0xffffffff, PDUMP_POLL_OPERATOR_EQUAL, 0); } /* Dump the expected value (i.e. the value after all the SW operations) */ SyncPrimPDumpValue(psSync->psSync, *pui32FenceValue); /* Reset the state as we've just done a HW operation */ psSync->bSWOperation = IMG_FALSE; } OSLockRelease(psSync->hLock); SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue); return PVRSRV_OK; }
/* Map an import to the device */ IMG_INTERNAL PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, IMG_BOOL bMap, DEVMEM_IMPORT *psImport) { DEVMEM_DEVICE_IMPORT *psDeviceImport; IMG_BOOL bStatus; RA_BASE_T uiAllocatedAddr; RA_LENGTH_T uiAllocatedSize; IMG_DEV_VIRTADDR sBase; IMG_HANDLE hReservation; PVRSRV_ERROR eError; psDeviceImport = &psImport->sDeviceImport; OSLockAcquire(psDeviceImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psDeviceImport->ui32RefCount, psDeviceImport->ui32RefCount+1); if (psDeviceImport->ui32RefCount++ == 0) { _DevmemImportStructAcquire(psImport); OSLockAcquire(psHeap->hLock); psHeap->uiImportCount++; OSLockRelease(psHeap->hLock); if (psHeap->psCtx->hBridge != psImport->hBridge) { /* The import was done with a different connection then the memory context which means they are not compatible. */ eError = PVRSRV_ERROR_INVALID_PARAMS; goto failCheck; } /* Allocate space in the VM */ bStatus = RA_Alloc(psHeap->psQuantizedVMRA, psImport->uiSize, 0, /* flags: this RA doesn't use flags*/ psImport->uiAlign, &uiAllocatedAddr, &uiAllocatedSize, IMG_NULL /* don't care about per-import priv data */ ); if (!bStatus) { eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM; goto failVMRAAlloc; } /* No reason for the allocated virtual size to be different from the PMR's size */ PVR_ASSERT(uiAllocatedSize == psImport->uiSize); sBase.uiAddr = uiAllocatedAddr; /* Setup page tables for the allocated VM space */ eError = BridgeDevmemIntReserveRange(psHeap->psCtx->hBridge, psHeap->hDevMemServerHeap, sBase, uiAllocatedSize, &hReservation); if (eError != PVRSRV_OK) { goto failReserve; } if (bMap) { DEVMEM_FLAGS_T uiMapFlags; uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK; /* Actually map the PMR to allocated VM space */ eError = BridgeDevmemIntMapPMR(psHeap->psCtx->hBridge, psHeap->hDevMemServerHeap, hReservation, psImport->hPMR, uiMapFlags, &psDeviceImport->hMapping); if (eError != PVRSRV_OK) { goto failMap; } psDeviceImport->bMapped = IMG_TRUE; } /* Setup device mapping specific parts of the mapping info */ psDeviceImport->hReservation = hReservation; psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr; psDeviceImport->psHeap = psHeap; } else { /* Check that we've been asked to map it into the same heap 2nd time around */ if (psHeap != psDeviceImport->psHeap) { eError = PVRSRV_ERROR_INVALID_HEAP; goto failParams; } } OSLockRelease(psDeviceImport->hLock); return PVRSRV_OK; failMap: BridgeDevmemIntUnreserveRange(psHeap->psCtx->hBridge, hReservation); failReserve: RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr); failVMRAAlloc: failCheck: _DevmemImportStructRelease(psImport); OSLockAcquire(psHeap->hLock); psHeap->uiImportCount--; OSLockRelease(psHeap->hLock); failParams: OSLockRelease(psDeviceImport->hLock); PVR_ASSERT(eError != PVRSRV_OK); return eError; }