IMG_VOID RGXDestroyCCB(RGX_CLIENT_CCB *psClientCCB) { PDumpUnregisterTransitionCallback(psClientCCB->hTransition); DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); DevmemFwFree(psClientCCB->psClientCCBCtrlMemDesc); DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); DevmemFwFree(psClientCCB->psClientCCBMemDesc); OSFreeMem(psClientCCB); }
static IMG_VOID _SyncPrimitiveBlockUnref(SYNC_PRIMITIVE_BLOCK *psSyncBlk) { IMG_UINT32 ui32RefCount; OSLockAcquire(psSyncBlk->hLock); ui32RefCount = --psSyncBlk->ui32RefCount; OSLockRelease(psSyncBlk->hLock); if (ui32RefCount == 0) { PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)", __FUNCTION__, psSyncBlk, ui32RefCount); _SyncConnectionRemoveBlock(psSyncBlk); OSLockDestroy(psSyncBlk->hLock); DevmemUnexport(psSyncBlk->psMemDesc, &psSyncBlk->sExportCookie); DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc); psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc); OSFreeMem(psSyncBlk); } else { SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d", __FUNCTION__, psSyncBlk, ui32RefCount); } }
IMG_VOID TLStreamClose(IMG_HANDLE hStream) { PTL_STREAM psTmp; PVR_DPF_ENTERED; if ( IMG_NULL == hStream ) { PVR_DPF((PVR_DBG_WARNING, "TLStreamClose failed as NULL stream handler passed, nothing done.\n")); PVR_DPF_RETURN; } psTmp = (PTL_STREAM)hStream; /* Decrement reference counter */ //Thread Safety: Not yet implemented OSLockAcquire(psTmp->hLock); psTmp->uiRefCount--; //Thread Safety: Not yet implemented OSLockRelease(psTmp->hLock); /* The stream is still being used in other context(s) do not destroy anything */ if ( 0 != psTmp->uiRefCount ) { PVR_DPF_RETURN; } else { if ( psTmp->bWaitForEmptyOnDestroy == IMG_TRUE ) { while (psTmp->ui32Read != psTmp->ui32Write) { OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_MS); } } /* First remove it from the global structures to prevent access * while it is being free'd. Lock it? */ TLRemoveStreamAndTryFreeStreamNode(psTmp->psNode); //Thread Safety: Not yet implemented OSLockDestroy(psTmp->hLock); // In block-while-reserve streams those not be NULL if ( IMG_TRUE == psTmp->bBlock ) { OSEventObjectClose(psTmp->hProducerEvent); OSEventObjectDestroy(psTmp->hProducerEventObj); } DevmemUnexport(psTmp->psStreamMemDesc, &psTmp->sExportCookie); DevmemReleaseCpuVirtAddr(psTmp->psStreamMemDesc); DevmemFree(psTmp->psStreamMemDesc); OSFREEMEM(psTmp); PVR_DPF_RETURN; } }
IMG_INTERNAL PVRSRV_ERROR TLClientCloseStream(IMG_HANDLE hSrvHandle, IMG_HANDLE hSD) { PVRSRV_ERROR eError = PVRSRV_OK; TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; PVR_ASSERT(hSrvHandle); PVR_ASSERT(hSD); /* Check the caller provided connection is valid */ if(!psSD->hServerSD) { PVR_DPF((PVR_DBG_ERROR, "TLClientCloseStream: descriptor already closed/not open")); return PVRSRV_ERROR_HANDLE_NOT_FOUND; } /* Check if acquire is outstanding, perform release if it is, ignore result * as there is not much we can do if it is an error other than close */ if (psSD->uiReadLen != NO_ACQUIRE) { (void) BridgeTLReleaseData(hSrvHandle, psSD->hServerSD, psSD->uiReadOffset, psSD->uiReadLen); psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; } /* Clean up DevMem resources used for this stream in this client */ DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc); DevmemFree(psSD->psUMmemDesc); /* Ignore error, not much that can be done */ (void) DevmemUnmakeServerExportClientExport(hSrvHandle, &psSD->sExportCookie); /* Send close to server to clean up kernel mode resources for this * handle and release the memory. */ eError = BridgeTLCloseStream(hSrvHandle, psSD->hServerSD); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "BridgeTLCloseStream: KM returned %d", eError)); /*/ Not much we can do with error, fall through to clean up * return eError; */ } OSMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC)); OSFreeMem (psSD); return eError; }
PVRSRV_ERROR RGXCreateCCB(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32CCBSizeLog2, CONNECTION_DATA *psConnectionData, const IMG_CHAR *pszName, RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, RGX_CLIENT_CCB **ppsClientCCB, DEVMEM_MEMDESC **ppsClientCCBMemDesc, DEVMEM_MEMDESC **ppsClientCCBCtrlMemDesc) { PVRSRV_ERROR eError; DEVMEM_FLAGS_T uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags; IMG_UINT32 ui32AllocSize = (1U << ui32CCBSizeLog2); RGX_CLIENT_CCB *psClientCCB; psClientCCB = OSAllocMem(sizeof(*psClientCCB)); if (psClientCCB == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto fail_alloc; } psClientCCB->psServerCommonContext = psServerCommonContext; uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) | PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_UNCACHED | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_UNCACHED | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; PDUMPCOMMENT("Allocate RGXFW cCCB"); eError = DevmemFwAllocateExportable(psDeviceNode, ui32AllocSize, uiClientCCBMemAllocFlags, "FirmwareClientCCB", &psClientCCB->psClientCCBMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB (%s)", PVRSRVGetErrorStringKM(eError))); goto fail_alloc_ccb; } eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, (IMG_VOID **) &psClientCCB->pui8ClientCCB); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)", PVRSRVGetErrorStringKM(eError))); goto fail_map_ccb; } PDUMPCOMMENT("Allocate RGXFW cCCB control"); eError = DevmemFwAllocateExportable(psDeviceNode, sizeof(RGXFWIF_CCCB_CTL), uiClientCCBCtlMemAllocFlags, "FirmwareClientCCBControl", &psClientCCB->psClientCCBCtrlMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB control (%s)", PVRSRVGetErrorStringKM(eError))); goto fail_alloc_ccbctrl; } eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc, (IMG_VOID **) &psClientCCB->psClientCCBCtrl); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)", PVRSRVGetErrorStringKM(eError))); goto fail_map_ccbctrl; } psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0; psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0; psClientCCB->psClientCCBCtrl->ui32DepOffset = 0; psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1; OSStringCopy(psClientCCB->szName, pszName); PDUMPCOMMENT("cCCB control"); DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, 0, sizeof(RGXFWIF_CCCB_CTL), PDUMP_FLAGS_CONTINUOUS); PVR_ASSERT(eError == PVRSRV_OK); psClientCCB->ui32HostWriteOffset = 0; psClientCCB->ui32LastPDumpWriteOffset = 0; psClientCCB->ui32Size = ui32AllocSize; #if defined REDUNDANT_SYNCS_DEBUG psClientCCB->ui32UpdateWriteIndex = 0; OSMemSet(psClientCCB->asFenceUpdateList, 0, sizeof(psClientCCB->asFenceUpdateList)); #endif eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData, _RGXCCBPDumpTransition, psClientCCB, &psClientCCB->hTransition); if (eError != PVRSRV_OK) { goto fail_pdumpreg; } /* Note: Due to resman the connection structure could be freed before the client CCB so rather then saving off the connection structure save the PDump specific memory which is refcounted to ensure it's not freed too early */ psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData; PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created", psClientCCB->szName, psClientCCB); *ppsClientCCB = psClientCCB; *ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc; *ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc; return PVRSRV_OK; fail_pdumpreg: DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); fail_map_ccbctrl: DevmemFwFree(psClientCCB->psClientCCBCtrlMemDesc); fail_alloc_ccbctrl: DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); fail_map_ccb: DevmemFwFree(psClientCCB->psClientCCBMemDesc); fail_alloc_ccb: OSFreeMem(psClientCCB); fail_alloc: PVR_ASSERT(eError != PVRSRV_OK); return eError; }
PVRSRV_ERROR PVRSRVAllocSyncPrimitiveBlockKM(PVRSRV_DEVICE_NODE *psDevNode, SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, IMG_UINT32 *puiSyncPrimVAddr, IMG_UINT32 *puiSyncPrimBlockSize, DEVMEM_EXPORTCOOKIE **psExportCookie) { SYNC_PRIMITIVE_BLOCK *psNewSyncBlk; PVRSRV_ERROR eError; psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK)); if (psNewSyncBlk == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e0; } psNewSyncBlk->psDevNode = psDevNode; PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block"); eError = psDevNode->pfnAllocUFOBlock(psDevNode, &psNewSyncBlk->psMemDesc, puiSyncPrimVAddr, &psNewSyncBlk->ui32BlockSize); if (eError != PVRSRV_OK) { goto e1; } eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc, (IMG_PVOID *) &psNewSyncBlk->pui32LinAddr); if (eError != PVRSRV_OK) { goto e2; } eError = DevmemExport(psNewSyncBlk->psMemDesc, &psNewSyncBlk->sExportCookie); if (eError != PVRSRV_OK) { goto e3; } psNewSyncBlk->ui32RefCount = 0; _SyncPrimitiveBlockRef(psNewSyncBlk); *psExportCookie = &psNewSyncBlk->sExportCookie; *ppsSyncBlk = psNewSyncBlk; *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize; PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocated UFO block (FirmwareVAddr = 0x%08x)", *puiSyncPrimVAddr); return PVRSRV_OK; e3: psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc); e2: DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc); e1: OSFreeMem(psNewSyncBlk); e0: return eError; }
/* * RGXRegisterMemoryContext */ PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, MMU_CONTEXT *psMMUContext, IMG_HANDLE *hPrivData) { PVRSRV_ERROR eError; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; DEVMEM_FLAGS_T uiFWMemContextMemAllocFlags; RGXFWIF_FWMEMCONTEXT *psFWMemContext; DEVMEM_MEMDESC *psFWMemContextMemDesc; SERVER_MMU_CONTEXT *psServerMMUContext; if (psDevInfo->psKernelMMUCtx == IMG_NULL) { /* * This must be the creation of the Kernel memory context. Take a copy * of the MMU context for use when programming the BIF. */ psDevInfo->psKernelMMUCtx = psMMUContext; } else { psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext)); if (psServerMMUContext == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto fail_alloc_server_ctx; } psServerMMUContext->psDevInfo = psDevInfo; /* * This FW MemContext is only mapped into kernel for initialisation purposes. * Otherwise this allocation is only used by the FW. * Therefore the GPU cache doesn't need coherency, * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick) */ uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) | PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE; /* Allocate device memory for the firmware memory context for the new application. */ PDUMPCOMMENT("Allocate RGX firmware memory context"); /* FIXME: why cache-consistent? */ eError = DevmemFwAllocate(psDevInfo, sizeof(*psFWMemContext), uiFWMemContextMemAllocFlags, "FirmwareMemoryContext", &psFWMemContextMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to allocate firmware memory context (%u)", eError)); goto fail_alloc_fw_ctx; } /* Temporarily map the firmware memory context to the kernel. */ eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc, (IMG_VOID **)&psFWMemContext); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to map firmware memory context (%u)", eError)); goto fail_acquire_cpu_addr; } /* * Write the new memory context's page catalogue into the firmware memory * context for the client. */ eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)", eError)); DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); goto fail_acquire_base_addr; } /* * Set default values for the rest of the structure. */ psFWMemContext->uiPageCatBaseRegID = -1; psFWMemContext->uiBreakpointAddr = 0; psFWMemContext->uiBPHandlerAddr = 0; psFWMemContext->uiBreakpointCtl = 0; #if defined(SUPPORT_GPUVIRT_VALIDATION) { IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg); psFWMemContext->ui32OSid = ui32OSidReg; } #endif #if defined(PDUMP) { IMG_CHAR aszName[PMR_MAX_MEMSPNAME_SYMB_ADDR_LENGTH_DEFAULT]; IMG_DEVMEM_OFFSET_T uiOffset = 0; /* * Dump the Mem context allocation */ DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS); /* * Obtain a symbolic addr of the mem context structure */ eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, &uiOffset, aszName, PMR_MAX_MEMSPNAME_SYMB_ADDR_LENGTH_DEFAULT); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to generate a Dump Page Catalogue address (%u)", eError)); DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); goto fail_pdump_cat_base_addr; } /* * Dump the Page Cat tag in the mem context (symbolic address) */ eError = MMU_PDumpWritePageCatBase(psMMUContext, aszName, uiOffset, 8, /* 64-bit register write */ 0, 0, 0); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)", eError)); DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); goto fail_pdump_cat_base; } } #endif /* * Release kernel address acquired above. */ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); /* * Store the process information for this device memory context * for use with the host page-fault analysis. */ psServerMMUContext->uiPID = OSGetCurrentProcessID(); psServerMMUContext->psMMUContext = psMMUContext; psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc; if (OSSNPrintf(psServerMMUContext->szProcessName, RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME, "%s", OSGetCurrentProcessName()) == RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME) { psServerMMUContext->szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME-1] = '\0'; } OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode); OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); MMU_SetDeviceData(psMMUContext, psFWMemContextMemDesc); *hPrivData = psServerMMUContext; } return PVRSRV_OK; #if defined(PDUMP) fail_pdump_cat_base: fail_pdump_cat_base_addr: MMU_ReleaseBaseAddr(IMG_NULL); #endif fail_acquire_base_addr: /* Done before jumping to the fail point as the release is done before exit */ fail_acquire_cpu_addr: DevmemFwFree(psServerMMUContext->psFWMemContextMemDesc); fail_alloc_fw_ctx: OSFreeMem(psServerMMUContext); fail_alloc_server_ctx: PVR_ASSERT(eError != PVRSRV_OK); return eError; }
PVRSRV_ERROR PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDevNode, SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, IMG_UINT32 *puiSyncPrimVAddr, IMG_UINT32 *puiSyncPrimBlockSize, DEVMEM_EXPORTCOOKIE **psExportCookie) { SYNC_PRIMITIVE_BLOCK *psNewSyncBlk; PVRSRV_ERROR eError; psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK)); if (psNewSyncBlk == IMG_NULL) { eError = PVRSRV_ERROR_OUT_OF_MEMORY; goto e0; } psNewSyncBlk->psDevNode = psDevNode; PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block"); eError = psDevNode->pfnAllocUFOBlock(psDevNode, &psNewSyncBlk->psMemDesc, puiSyncPrimVAddr, &psNewSyncBlk->ui32BlockSize); if (eError != PVRSRV_OK) { goto e1; } eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc, (IMG_PVOID *) &psNewSyncBlk->pui32LinAddr); if (eError != PVRSRV_OK) { goto e2; } eError = DevmemExport(psNewSyncBlk->psMemDesc, &psNewSyncBlk->sExportCookie); if (eError != PVRSRV_OK) { goto e3; } eError = OSLockCreate(&psNewSyncBlk->hLock, LOCK_TYPE_NONE); if (eError != PVRSRV_OK) { goto e4; } psNewSyncBlk->ui32RefCount = 1; /* If there is a connection pointer then add the new block onto it's list */ _SyncConnectionAddBlock(psConnection, psNewSyncBlk); *psExportCookie = &psNewSyncBlk->sExportCookie; *ppsSyncBlk = psNewSyncBlk; *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize; PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocated UFO block (FirmwareVAddr = 0x%08x)", *puiSyncPrimVAddr); return PVRSRV_OK; e4: DevmemUnexport(psNewSyncBlk->psMemDesc, &psNewSyncBlk->sExportCookie); e3: DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc); e2: psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc); e1: OSFreeMem(psNewSyncBlk); e0: return eError; }
/******************************************************************************* * TL Server public API implementation. ******************************************************************************/ PVRSRV_ERROR TLStreamCreate(IMG_HANDLE *phStream, IMG_CHAR *szStreamName, IMG_UINT32 ui32Size, IMG_UINT32 ui32StreamFlags, TL_STREAM_SOURCECB pfProducerCB, IMG_PVOID pvProducerUD) { PTL_STREAM psTmp; PVRSRV_ERROR eError; IMG_HANDLE hEventList; PTL_SNODE psn = 0; IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE+20]; DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_UNCACHED | /* GPU & CPU */ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE; PVR_DPF_ENTERED; /* Sanity checks: */ /* non NULL handler required */ if ( NULL == phStream ) { PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); } if (OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE) { PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); } /* Check if there already exists a stream with this name. */ psn = TLFindStreamNodeByName( szStreamName ); if ( IMG_NULL != psn ) { PVR_DPF_RETURN_RC(PVRSRV_ERROR_ALREADY_EXISTS); } /* Allocate stream structure container (stream struct) for the new stream */ psTmp = OSAllocZMem(sizeof(TL_STREAM)) ; if ( NULL == psTmp ) { PVR_DPF_RETURN_RC(PVRSRV_ERROR_OUT_OF_MEMORY); } OSStringCopy(psTmp->szName, szStreamName); if ( ui32StreamFlags & TL_FLAG_FORCE_FLUSH ) { psTmp->bWaitForEmptyOnDestroy = IMG_TRUE; } psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ? IMG_TRUE : IMG_FALSE; if ( ui32StreamFlags & TL_FLAG_DROP_DATA ) { if ( ui32StreamFlags & TL_FLAG_BLOCKING_RESERVE ) { eError = PVRSRV_ERROR_INVALID_PARAMS; goto e0; } psTmp->bDrop = IMG_TRUE; } else if ( ui32StreamFlags & TL_FLAG_BLOCKING_RESERVE ) { /* Additional synchronization object required for this kind of stream */ psTmp->bBlock = IMG_TRUE; eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj); if (eError != PVRSRV_OK) { goto e0; } /* Create an event handle for this kind of stream */ eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent); if (eError != PVRSRV_OK) { goto e1; } } /* Remember producer supplied CB and data for later */ psTmp->pfProducerCallback = (IMG_VOID(*)(IMG_VOID))pfProducerCB; psTmp->pvProducerUserData = pvProducerUD; /* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */ psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size); psTmp->ui32Read = 0; psTmp->ui32Write = 0; psTmp->ui32Pending = NOTHING_PENDING; OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s", szStreamName); /* Allocate memory for the circular buffer and export it to user space. */ eError = DevmemAllocateExportable( IMG_NULL, (IMG_HANDLE) TLGetGlobalRgxDevice(), (IMG_DEVMEM_SIZE_T)psTmp->ui32Size, (IMG_DEVMEM_ALIGN_T) OSGetPageSize(), uiMemFlags | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE, pszBufferLabel, &psTmp->psStreamMemDesc); PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e2); eError = DevmemAcquireCpuVirtAddr( psTmp->psStreamMemDesc, (IMG_VOID**) &psTmp->pbyBuffer ); PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e3); eError = DevmemExport(psTmp->psStreamMemDesc, &(psTmp->sExportCookie)); PVR_LOGG_IF_ERROR(eError, "DevmemExport", e4); /* Synchronization object to synchronize with user side data transfers. */ eError = OSEventObjectCreate(psTmp->szName, &hEventList); if (eError != PVRSRV_OK) { goto e5; } /* Stream created, now reset the reference count to 1 */ psTmp->uiRefCount = 1; //Thread Safety: Not yet implemented eError = OSLockCreate(&psTmp->hLock, LOCK_TYPE_PASSIVE); //Thread Safety: Not yet implemented if (eError != PVRSRV_OK) //Thread Safety: Not yet implemented { //Thread Safety: Not yet implemented goto e6; //Thread Safety: Not yet implemented } /* Now remember the stream in the global TL structures */ psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, 0); if (psn == NULL) { eError=PVRSRV_ERROR_OUT_OF_MEMORY; goto e7; } TLAddStreamNode(psn); /* Best effort signal, client wait timeout will ultimately let it find the * new stream if this fails, acceptable to avoid cleanup as it is tricky * at this point */ (void) OSEventObjectSignal(TLGGD()->hTLEventObj); /* Pass the newly created stream handle back to caller */ *phStream = (IMG_HANDLE)psTmp; PVR_DPF_RETURN_OK; e7: //Thread Safety: Not yet implemented OSLockDestroy(psTmp->hLock); //Thread Safety: Not yet implemented e6: OSEventObjectDestroy(hEventList); e5: DevmemUnexport(psTmp->psStreamMemDesc, &(psTmp->sExportCookie)); e4: DevmemReleaseCpuVirtAddr( psTmp->psStreamMemDesc ); e3: DevmemFree(psTmp->psStreamMemDesc); e2: OSEventObjectClose(psTmp->hProducerEvent); e1: OSEventObjectDestroy(psTmp->hProducerEventObj); e0: OSFREEMEM(psTmp); PVR_DPF_RETURN_RC(eError); }
IMG_EXPORT PVRSRV_ERROR PVRSRVRGXCreateTQ2DContextKM(PVRSRV_DEVICE_NODE *psDeviceNode, DEVMEM_MEMDESC *psTQ2DCCBMemDesc, DEVMEM_MEMDESC *psTQ2DCCBCtlMemDesc, RGX_TQ2D_CLEANUP_DATA **ppsCleanupData, DEVMEM_MEMDESC **ppsFWTQ2DContextMemDesc, IMG_UINT32 ui32Priority, IMG_UINT32 ui32FrameworkRegisterSize, IMG_PBYTE pbyFrameworkRegisters, IMG_HANDLE hMemCtxPrivData) { PVRSRV_ERROR eError = PVRSRV_OK; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGXFWIF_FWCOMMONCONTEXT *psFWTQ2DContext; RGX_TQ2D_CLEANUP_DATA *psTmpCleanup; DEVMEM_MEMDESC *psFWFrameworkMemDesc; /* Prepare cleanup struct */ psTmpCleanup = OSAllocMem(sizeof(*psTmpCleanup)); if (psTmpCleanup == IMG_NULL) { return PVRSRV_ERROR_OUT_OF_MEMORY; } OSMemSet(psTmpCleanup, 0, sizeof(*psTmpCleanup)); *ppsCleanupData = psTmpCleanup; /* Allocate cleanup sync */ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext, &psTmpCleanup->psCleanupSync); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate cleanup sync (0x%x)", eError)); goto fail_syncalloc; } /* Allocate device memory for the firmware TQ 2D context. */ PDUMPCOMMENT("Allocate RGX firmware TQ 2D context"); eError = DevmemFwAllocate(psDevInfo, sizeof(*psFWTQ2DContext), RGX_FWCOMCTX_ALLOCFLAGS, ppsFWTQ2DContextMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to allocate firmware TQ 2D context (%u)", eError)); goto fail_contextalloc; } psTmpCleanup->psFWTQ2DContextMemDesc = *ppsFWTQ2DContextMemDesc; psTmpCleanup->psDeviceNode = psDeviceNode; /* Temporarily map the firmware TQ 2D context to the kernel. */ eError = DevmemAcquireCpuVirtAddr(*ppsFWTQ2DContextMemDesc, (IMG_VOID **)&psFWTQ2DContext); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to map firmware TQ 2D context (%u)", eError)); goto fail_cpuvirtacquire; } /* * Create the FW framework buffer */ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, & psFWFrameworkMemDesc, ui32FrameworkRegisterSize); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to allocate firmware GPU framework state (%u)", eError)); goto fail_frameworkcreate; } psTmpCleanup->psFWFrameworkMemDesc = psFWFrameworkMemDesc; /* Copy the Framework client data into the framework buffer */ eError = PVRSRVRGXFrameworkCopyRegisters(psFWFrameworkMemDesc, pbyFrameworkRegisters, ui32FrameworkRegisterSize); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to populate the framework buffer (%u)", eError)); goto fail_frameworkcopy; } eError = RGXInitFWCommonContext(psFWTQ2DContext, psTQ2DCCBMemDesc, psTQ2DCCBCtlMemDesc, hMemCtxPrivData, psFWFrameworkMemDesc, ui32Priority, IMG_NULL, & psTmpCleanup->sFWComContextCleanup); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to init firmware common context (%u)", eError)); goto fail_contextinit; } /* * Dump the TQ2D and the memory contexts */ PDUMPCOMMENT("Dump FWTQ2DContext"); DevmemPDumpLoadMem(*ppsFWTQ2DContextMemDesc, 0, sizeof(*psFWTQ2DContext), PDUMP_FLAGS_CONTINUOUS); /* Release address acquired above. */ DevmemReleaseCpuVirtAddr(*ppsFWTQ2DContextMemDesc); return PVRSRV_OK; fail_contextinit: fail_frameworkcopy: DevmemFwFree(psFWFrameworkMemDesc); fail_frameworkcreate: DevmemReleaseCpuVirtAddr(*ppsFWTQ2DContextMemDesc); fail_cpuvirtacquire: DevmemFwFree(*ppsFWTQ2DContextMemDesc); fail_contextalloc: SyncPrimFree(psTmpCleanup->psCleanupSync); fail_syncalloc: OSFreeMem(psTmpCleanup); return eError; }
/* * PVRSRVRGXDestroyTQ3DContextKM */ IMG_EXPORT PVRSRV_ERROR PVRSRVRGXDestroyTQ3DContextKM(RGX_TQ3D_CLEANUP_DATA *psCleanupData) { PVRSRV_ERROR eError = PVRSRV_OK; PRGXFWIF_FWCOMMONCONTEXT psFWComContextFWAddr; RGXSetFirmwareAddress(&psFWComContextFWAddr, psCleanupData->psFWTQ3DContextMemDesc, 0, RFW_FWADDR_NOREF_FLAG); eError = RGXFWRequestCommonContextCleanUp(psCleanupData->psDeviceNode, psFWComContextFWAddr, psCleanupData->psCleanupSync, RGXFWIF_DM_3D); /* If we get retry error then we can't free this resource as it's still in use and we will be called again */ if (eError != PVRSRV_ERROR_RETRY) { eError = RGXDeinitFWCommonContext(&psCleanupData->sFWComContextCleanup); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDestroyTQ3DContextKM : failed to deinit fw common ctx. Error:%u", eError)); goto e0; } #if defined(DEBUG) /* Log the number of TQ3D context stores which occurred */ { RGXFWIF_3DCTX_STATE *psFWState; eError = DevmemAcquireCpuVirtAddr(psCleanupData->psFWTQ3DContextStateMemDesc, (IMG_VOID**)&psFWState); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to map firmware render context state (%u)", eError)); } else { PVR_DPF((PVR_DBG_WARNING,"Number of context stores on FW TQ3D context 0x%010x: %u", psFWComContextFWAddr.ui32Addr, psFWState->ui32NumStores)); /* Release the CPU virt addr */ DevmemReleaseCpuVirtAddr(psCleanupData->psFWTQ3DContextStateMemDesc); } } #endif /* * Unmap the TA/3D context state buffer pointers */ RGXUnsetFirmwareAddress(psCleanupData->psFWTQ3DContextStateMemDesc); /* * Free the firmware TQ/3D context state buffer */ DevmemFwFree(psCleanupData->psFWTQ3DContextStateMemDesc); /* Free the framework buffer */ DevmemFwFree(psCleanupData->psFWFrameworkMemDesc); /* * Free the firmware common context. */ DevmemFwFree(psCleanupData->psFWTQ3DContextMemDesc); /* Free the cleanup sync */ SyncPrimFree(psCleanupData->psCleanupSync); OSFreeMem(psCleanupData); } e0: return eError; }
/* * PVRSRVRGXCreateTQ3DContextKM */ IMG_EXPORT PVRSRV_ERROR PVRSRVRGXCreateTQ3DContextKM(PVRSRV_DEVICE_NODE *psDeviceNode, DEVMEM_MEMDESC *psTQ3DCCBMemDesc, DEVMEM_MEMDESC *psTQ3DCCBCtlMemDesc, RGX_TQ3D_CLEANUP_DATA **ppsCleanupData, DEVMEM_MEMDESC **ppsFWTQ3DContextMemDesc, DEVMEM_MEMDESC **ppsFWTQ3DContextStateMemDesc, IMG_UINT32 ui32Priority, IMG_DEV_VIRTADDR sMCUFenceAddr, IMG_UINT32 ui32FrameworkRegisterSize, IMG_PBYTE pbyFrameworkRegisters, IMG_HANDLE hMemCtxPrivData) { PVRSRV_ERROR eError = PVRSRV_OK; PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; RGXFWIF_FWCOMMONCONTEXT *psFWTQ3DContext; RGX_TQ3D_CLEANUP_DATA *psTmpCleanup; DEVMEM_MEMDESC *psFWFrameworkMemDesc; /* Prepare cleanup struct */ psTmpCleanup = OSAllocMem(sizeof(*psTmpCleanup)); if (psTmpCleanup == IMG_NULL) { return PVRSRV_ERROR_OUT_OF_MEMORY; } OSMemSet(psTmpCleanup, 0, sizeof(*psTmpCleanup)); *ppsCleanupData = psTmpCleanup; /* Allocate cleanup sync */ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext, &psTmpCleanup->psCleanupSync); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate cleanup sync (0x%x)", eError)); goto fail_syncalloc; } /* Allocate device memory for the firmware TQ 3D context. */ PDUMPCOMMENT("Allocate RGX firmware TQ 3D context"); eError = DevmemFwAllocate(psDevInfo, sizeof(*psFWTQ3DContext), RGX_FWCOMCTX_ALLOCFLAGS, ppsFWTQ3DContextMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to allocate firmware TQ 3D context (%u)", eError)); goto fail_contextalloc; } psTmpCleanup->psFWTQ3DContextMemDesc = *ppsFWTQ3DContextMemDesc; psTmpCleanup->psDeviceNode = psDeviceNode; /* Temporarily map the firmware TQ 3D context to the kernel. */ eError = DevmemAcquireCpuVirtAddr(*ppsFWTQ3DContextMemDesc, (IMG_VOID **)&psFWTQ3DContext); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to map firmware TQ 3D context (%u)", eError)); goto fail_cpuvirtacquire; } /* Allocate device memory for the firmware GPU context suspend state. Note: the FW reads/writes the state to memory by accessing the GPU register interface. */ PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state"); eError = DevmemFwAllocate(psDevInfo, sizeof(RGXFWIF_3DCTX_STATE), RGX_FWCOMCTX_ALLOCFLAGS, ppsFWTQ3DContextStateMemDesc); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to allocate firmware GPU context suspend state (%u)", eError)); goto fail_contextsuspendalloc; } psTmpCleanup->psFWTQ3DContextStateMemDesc = *ppsFWTQ3DContextStateMemDesc; /* * Create the FW framework buffer */ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, & psFWFrameworkMemDesc, ui32FrameworkRegisterSize); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to allocate firmware GPU framework state (%u)", eError)); goto fail_frameworkcreate; } psTmpCleanup->psFWFrameworkMemDesc = psFWFrameworkMemDesc; /* Copy the Framework client data into the framework buffer */ eError = PVRSRVRGXFrameworkCopyRegisters(psFWFrameworkMemDesc, pbyFrameworkRegisters, ui32FrameworkRegisterSize); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to populate the framework buffer (%u)", eError)); goto fail_frameworkcopy; } /* Init TQ/3D FW common context */ eError = RGXInitFWCommonContext(psFWTQ3DContext, psTQ3DCCBMemDesc, psTQ3DCCBCtlMemDesc, hMemCtxPrivData, psFWFrameworkMemDesc, ui32Priority, &sMCUFenceAddr, &psTmpCleanup->sFWComContextCleanup); if (eError != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to init firmware common context (%u)", eError)); goto fail_contextinit; } /* * Set the firmware GPU context state buffer. * * The common context stores a dword pointer (FW) so we can cast the generic buffer to * the correct 3D (3D/TQ = normal 3D) state structure type in the FW. */ RGXSetFirmwareAddress(&psFWTQ3DContext->psContextState, *ppsFWTQ3DContextStateMemDesc, 0, RFW_FWADDR_FLAG_NONE); /* * Dump the TQ3D and the memory contexts */ PDUMPCOMMENT("Dump FWTQ3DContext"); DevmemPDumpLoadMem(*ppsFWTQ3DContextMemDesc, 0, sizeof(*psFWTQ3DContext), PDUMP_FLAGS_CONTINUOUS); /* * Dump the FW TQ/3D context suspend state buffer */ PDUMPCOMMENT("Dump FWTQ3DContextState"); DevmemPDumpLoadMem(*ppsFWTQ3DContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS); /* Release address acquired above. */ DevmemReleaseCpuVirtAddr(*ppsFWTQ3DContextMemDesc); return PVRSRV_OK; fail_contextinit: fail_frameworkcopy: DevmemFwFree(psFWFrameworkMemDesc); fail_frameworkcreate: DevmemFwFree(*ppsFWTQ3DContextStateMemDesc); fail_contextsuspendalloc: DevmemReleaseCpuVirtAddr(*ppsFWTQ3DContextMemDesc); fail_cpuvirtacquire: DevmemFwFree(*ppsFWTQ3DContextMemDesc); fail_contextalloc: SyncPrimFree(psTmpCleanup->psCleanupSync); fail_syncalloc: OSFreeMem(psTmpCleanup); return eError; }