static void BM_FreeMemory(void *h, u32 _base, struct BM_MAPPING *psMapping) { struct BM_HEAP *pBMHeap = h; size_t uPSize; PVR_UNREFERENCED_PARAMETER(_base); PVR_DPF(PVR_DBG_MESSAGE, "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, psMapping); PVR_ASSERT(psMapping != NULL); if (psMapping == NULL) { PVR_DPF(PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter"); return; } DevMemoryFree(psMapping); if ((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0) psMapping->uSize /= 2; if (psMapping->ui32Flags & PVRSRV_MEM_DUMMY) uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize; else uPSize = psMapping->uSize; if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { OSFreePages(pBMHeap->ui32Attribs, uPSize, (void *)psMapping->CpuVAddr, psMapping->hOSMemHandle); } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { struct IMG_SYS_PHYADDR sSysPAddr; OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle); sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr); RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } else { PVR_DPF(PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type"); } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), psMapping, NULL); PVR_DPF(PVR_DBG_MESSAGE, "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, psMapping); }
/* Unmap an import from the Device */ IMG_INTERNAL IMG_VOID _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport) { PVRSRV_ERROR eError; DEVMEM_DEVICE_IMPORT *psDeviceImport; psDeviceImport = &psImport->sDeviceImport; OSLockAcquire(psDeviceImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psDeviceImport->ui32RefCount, psDeviceImport->ui32RefCount-1); if (--psDeviceImport->ui32RefCount == 0) { DEVMEM_HEAP *psHeap = psDeviceImport->psHeap; if (psDeviceImport->bMapped) { eError = BridgeDevmemIntUnmapPMR(psImport->hBridge, psDeviceImport->hMapping); PVR_ASSERT(eError == PVRSRV_OK); } eError = BridgeDevmemIntUnreserveRange(psImport->hBridge, psDeviceImport->hReservation); PVR_ASSERT(eError == PVRSRV_OK); RA_Free(psHeap->psQuantizedVMRA, psDeviceImport->sDevVAddr.uiAddr); OSLockRelease(psDeviceImport->hLock); _DevmemImportStructRelease(psImport); OSLockAcquire(psHeap->hLock); psHeap->uiImportCount--; OSLockRelease(psHeap->hLock); } else { OSLockRelease(psDeviceImport->hLock); } }
IMG_INTERNAL IMG_VOID _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc) { PVR_ASSERT(psMemDesc != NULL); PVR_ASSERT(psMemDesc->ui32RefCount != 0); OSLockAcquire(psMemDesc->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psMemDesc, psMemDesc->ui32RefCount, psMemDesc->ui32RefCount-1); if (--psMemDesc->ui32RefCount == 0) { OSLockRelease(psMemDesc->hLock); if (!psMemDesc->psImport->bExportable) { RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA, psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr + psMemDesc->uiOffset); } else { _DevmemImportStructRelease(psMemDesc->psImport); } OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); OSLockDestroy(psMemDesc->hLock); OSFreeMem(psMemDesc); } else { OSLockRelease(psMemDesc->hLock); } }
/* Map an import to the device */ IMG_INTERNAL PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, IMG_BOOL bMap, DEVMEM_IMPORT *psImport) { DEVMEM_DEVICE_IMPORT *psDeviceImport; IMG_BOOL bStatus; RA_BASE_T uiAllocatedAddr; RA_LENGTH_T uiAllocatedSize; IMG_DEV_VIRTADDR sBase; IMG_HANDLE hReservation; PVRSRV_ERROR eError; psDeviceImport = &psImport->sDeviceImport; OSLockAcquire(psDeviceImport->hLock); DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", __FUNCTION__, psImport, psDeviceImport->ui32RefCount, psDeviceImport->ui32RefCount+1); if (psDeviceImport->ui32RefCount++ == 0) { _DevmemImportStructAcquire(psImport); OSLockAcquire(psHeap->hLock); psHeap->uiImportCount++; OSLockRelease(psHeap->hLock); if (psHeap->psCtx->hBridge != psImport->hBridge) { /* The import was done with a different connection then the memory context which means they are not compatible. */ eError = PVRSRV_ERROR_INVALID_PARAMS; goto failCheck; } /* Allocate space in the VM */ bStatus = RA_Alloc(psHeap->psQuantizedVMRA, psImport->uiSize, 0, /* flags: this RA doesn't use flags*/ psImport->uiAlign, &uiAllocatedAddr, &uiAllocatedSize, IMG_NULL /* don't care about per-import priv data */ ); if (!bStatus) { eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM; goto failVMRAAlloc; } /* No reason for the allocated virtual size to be different from the PMR's size */ PVR_ASSERT(uiAllocatedSize == psImport->uiSize); sBase.uiAddr = uiAllocatedAddr; /* Setup page tables for the allocated VM space */ eError = BridgeDevmemIntReserveRange(psHeap->psCtx->hBridge, psHeap->hDevMemServerHeap, sBase, uiAllocatedSize, &hReservation); if (eError != PVRSRV_OK) { goto failReserve; } if (bMap) { DEVMEM_FLAGS_T uiMapFlags; uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK; /* Actually map the PMR to allocated VM space */ eError = BridgeDevmemIntMapPMR(psHeap->psCtx->hBridge, psHeap->hDevMemServerHeap, hReservation, psImport->hPMR, uiMapFlags, &psDeviceImport->hMapping); if (eError != PVRSRV_OK) { goto failMap; } psDeviceImport->bMapped = IMG_TRUE; } /* Setup device mapping specific parts of the mapping info */ psDeviceImport->hReservation = hReservation; psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr; psDeviceImport->psHeap = psHeap; } else { /* Check that we've been asked to map it into the same heap 2nd time around */ if (psHeap != psDeviceImport->psHeap) { eError = PVRSRV_ERROR_INVALID_HEAP; goto failParams; } } OSLockRelease(psDeviceImport->hLock); return PVRSRV_OK; failMap: BridgeDevmemIntUnreserveRange(psHeap->psCtx->hBridge, hReservation); failReserve: RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr); failVMRAAlloc: failCheck: _DevmemImportStructRelease(psImport); OSLockAcquire(psHeap->hLock); psHeap->uiImportCount--; OSLockRelease(psHeap->hLock); failParams: OSLockRelease(psDeviceImport->hLock); PVR_ASSERT(eError != PVRSRV_OK); return eError; }
static IMG_VOID FreeBuf (BM_BUF *pBuf, IMG_UINT32 ui32Flags, IMG_BOOL bFromAllocator) { BM_MAPPING *pMapping; PVR_DPF ((PVR_DBG_MESSAGE, "FreeBuf: pBuf=0x%x: DevVAddr=%08X CpuVAddr=0x%x CpuPAddr=%08X", (IMG_UINTPTR_T)pBuf, pBuf->DevVAddr.uiAddr, (IMG_UINTPTR_T)pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr)); pMapping = pBuf->pMapping; if(ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) { if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) { if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) { PVR_DPF ((PVR_DBG_ERROR, "FreeBuf: combination of DevVAddr management and RAM backing mode unsupported")); } else { OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); pBuf->pMapping = IMG_NULL; } } } else { if(pBuf->hOSMemHandle != pMapping->hOSMemHandle) { if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) { OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags); } } if(ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) { if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) { PVR_ASSERT(pBuf->ui32ExportCount == 0) RA_Free (pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE); } } else { if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) { switch (pMapping->eCpuMemoryOrigin) { case hm_wrapped: OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); break; case hm_wrapped_virtaddr: OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); break; case hm_wrapped_scatter: OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); break; case hm_wrapped_scatter_virtaddr: OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); break; default: break; } } if (bFromAllocator) DevMemoryFree (pMapping); if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) { OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); pBuf->pMapping = IMG_NULL; } } } if ((pBuf->ui32ExportCount == 0) && (pBuf->ui32RefCount == 0)) { OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_BUF), pBuf, IMG_NULL); } }
static IMG_VOID BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping) { BM_HEAP *pBMHeap = h; IMG_SIZE_T uPSize; PVR_UNREFERENCED_PARAMETER (_base); PVR_DPF ((PVR_DBG_MESSAGE, "BM_FreeMemory (h=0x%x, base=0x%x, psMapping=0x%x)", (IMG_UINTPTR_T)h, _base, (IMG_UINTPTR_T)psMapping)); PVR_ASSERT (psMapping != IMG_NULL); if (psMapping == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter")); return; } DevMemoryFree (psMapping); if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0) { psMapping->uSize /= 2; } if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY) { uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize; } else { uPSize = psMapping->uSize; } if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { OSFreePages(pBMHeap->ui32Attribs, uPSize, (IMG_VOID *) psMapping->CpuVAddr, psMapping->hOSMemHandle); } else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { IMG_SYS_PHYADDR sSysPAddr; OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle); sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr); RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } else { PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type")); } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL); PVR_DPF((PVR_DBG_MESSAGE, "..BM_FreeMemory (h=0x%x, base=0x%x)", (IMG_UINTPTR_T)h, _base)); }
static IMG_BOOL BM_ImportMemory (IMG_VOID *pH, IMG_SIZE_T uRequestSize, IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping, IMG_UINT32 uFlags, IMG_UINTPTR_T *pBase) { BM_MAPPING *pMapping; BM_HEAP *pBMHeap = pH; BM_CONTEXT *pBMContext = pBMHeap->pBMContext; IMG_BOOL bResult; IMG_SIZE_T uSize; IMG_SIZE_T uPSize; IMG_UINT32 uDevVAddrAlignment = 0; PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory (pBMContext=0x%x, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)", (IMG_UINTPTR_T)pBMContext, uRequestSize, uFlags, uDevVAddrAlignment)); PVR_ASSERT (ppsMapping != IMG_NULL); PVR_ASSERT (pBMContext != IMG_NULL); if (ppsMapping == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter")); goto fail_exit; } uSize = HOST_PAGEALIGN (uRequestSize); PVR_ASSERT (uSize >= uRequestSize); if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_MAPPING), (IMG_PVOID *)&pMapping, IMG_NULL, "Buffer Manager Mapping") != PVRSRV_OK) { PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc")); goto fail_exit; } pMapping->hOSMemHandle = 0; pMapping->CpuVAddr = 0; pMapping->DevVAddr.uiAddr = 0; pMapping->CpuPAddr.uiAddr = 0; pMapping->uSize = uSize; pMapping->pBMHeap = pBMHeap; pMapping->ui32Flags = uFlags; if (pActualSize) { *pActualSize = uSize; } if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) { uPSize = pBMHeap->sDevArena.ui32DataPageSize; } else { uPSize = pMapping->uSize; } if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs; if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) { ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); } if (OSAllocPages(ui32Attribs, uPSize, pBMHeap->sDevArena.ui32DataPageSize, (IMG_VOID **)&pMapping->CpuVAddr, &pMapping->hOSMemHandle) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSAllocPages(0x%x) failed", uPSize)); goto fail_mapping_alloc; } pMapping->eCpuMemoryOrigin = hm_env; } else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { IMG_SYS_PHYADDR sSysPAddr; IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs; PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL); if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) { ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); } if (!RA_Alloc (pBMHeap->pLocalDevMemArena, uPSize, IMG_NULL, IMG_NULL, 0, pBMHeap->sDevArena.ui32DataPageSize, 0, (IMG_UINTPTR_T *)&sSysPAddr.uiAddr)) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize)); goto fail_mapping_alloc; } pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); if(OSReservePhys(pMapping->CpuPAddr, uPSize, ui32Attribs, &pMapping->CpuVAddr, &pMapping->hOSMemHandle) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed")); goto fail_dev_mem_alloc; } pMapping->eCpuMemoryOrigin = hm_contiguous; } else { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type")); goto fail_mapping_alloc; } bResult = DevMemoryAlloc (pBMContext, pMapping, IMG_NULL, uFlags, uDevVAddrAlignment, &pMapping->DevVAddr); if (!bResult) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: DevMemoryAlloc(0x%x) failed", pMapping->uSize)); goto fail_dev_mem_alloc; } PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1); *pBase = pMapping->DevVAddr.uiAddr; *ppsMapping = pMapping; PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE")); return IMG_TRUE; fail_dev_mem_alloc: if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle)) { if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) { pMapping->uSize /= 2; } if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) { uPSize = pBMHeap->sDevArena.ui32DataPageSize; } else { uPSize = pMapping->uSize; } if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { OSFreePages(pBMHeap->ui32Attribs, uPSize, (IMG_VOID *)pMapping->CpuVAddr, pMapping->hOSMemHandle); } else { IMG_SYS_PHYADDR sSysPAddr; if(pMapping->CpuVAddr) { OSUnReservePhys(pMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, pMapping->hOSMemHandle); } sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr); RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } } fail_mapping_alloc: OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); fail_exit: return IMG_FALSE; }
void MMU_Finalise(struct MMU_CONTEXT *psMMUContext) { u32 *pui32Tmp, i; struct SYS_DATA *psSysData; struct MMU_CONTEXT **ppsMMUContext; if (SysAcquireData(&psSysData) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "MMU_Finalise: ERROR call to SysAcquireData failed"); return; } PDUMPCOMMENT("Free page directory"); PDUMPFREEPAGETABLE(psMMUContext->pvPDCpuVAddr); pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr; for (i = 0; i < SGX_MMU_PD_SIZE; i++) pui32Tmp[i] = 0; if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) { OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE, psMMUContext->pvPDCpuVAddr, psMMUContext->hPDOSMemHandle); } else { struct IMG_SYS_PHYADDR sSysPAddr; struct IMG_CPU_PHYADDR sCpuPAddr; sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr); sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); OSUnMapPhysToLin((void __iomem __force *) psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, psMMUContext->hPDOSMemHandle); RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } PVR_DPF(PVR_DBG_MESSAGE, "MMU_Finalise"); ppsMMUContext = (struct MMU_CONTEXT **) &psMMUContext->psDevInfo->pvMMUContextList; while (*ppsMMUContext) { if (*ppsMMUContext == psMMUContext) { *ppsMMUContext = psMMUContext->psNext; break; } ppsMMUContext = &((*ppsMMUContext)->psNext); } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT), psMMUContext, NULL); }
enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode, struct MMU_CONTEXT **ppsMMUContext, struct IMG_DEV_PHYADDR *psPDDevPAddr) { u32 *pui32Tmp; u32 i; void *pvPDCpuVAddr; struct IMG_DEV_PHYADDR sPDDevPAddr; struct IMG_CPU_PHYADDR sCpuPAddr; struct IMG_SYS_PHYADDR sSysPAddr; struct MMU_CONTEXT *psMMUContext; void *hPDOSMemHandle; struct SYS_DATA *psSysData; struct PVRSRV_SGXDEV_INFO *psDevInfo; PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise"); if (SysAcquireData(&psSysData) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: ERROR call to SysAcquireData failed"); return PVRSRV_ERROR_GENERIC; } if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: ERROR call to OSAllocMem failed"); return PVRSRV_ERROR_GENERIC; } OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT)); psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice; psMMUContext->psDevInfo = psDevInfo; psMMUContext->psDeviceNode = psDeviceNode; if (psDeviceNode->psLocalDevMemArena == NULL) { if (OSAllocPages (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr, &hPDOSMemHandle) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: " "ERROR call to OSAllocPages failed"); goto err1; } if (pvPDCpuVAddr) sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr); else sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0); sPDDevPAddr = SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr); } else { if (RA_Alloc(psDeviceNode->psLocalDevMemArena, SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE, &(sSysPAddr.uiAddr)) != IMG_TRUE) { PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: " "ERROR call to RA_Alloc failed"); goto err1; } sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); sPDDevPAddr = SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr); pvPDCpuVAddr = (void __force *) OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle); if (!pvPDCpuVAddr) { PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: " "ERROR failed to map page tables"); goto err2; } } PDUMPCOMMENT("Alloc page directory"); PDUMPMALLOCPAGETABLE(pvPDCpuVAddr, PDUMP_PD_UNIQUETAG); if (pvPDCpuVAddr) { pui32Tmp = (u32 *) pvPDCpuVAddr; } else { PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: pvPDCpuVAddr invalid"); goto err3; } for (i = 0; i < SGX_MMU_PD_SIZE; i++) pui32Tmp[i] = 0; PDUMPCOMMENT("Page directory contents"); PDUMPPAGETABLE(pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, IMG_TRUE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr; psMMUContext->sPDDevPAddr = sPDDevPAddr; psMMUContext->hPDOSMemHandle = hPDOSMemHandle; *ppsMMUContext = psMMUContext; *psPDDevPAddr = sPDDevPAddr; psMMUContext->psNext = (struct MMU_CONTEXT *) psDevInfo->pvMMUContextList; psDevInfo->pvMMUContextList = (void *) psMMUContext; return PVRSRV_OK; err3: if (psDeviceNode->psLocalDevMemArena) OSUnMapPhysToLin((void __iomem __force *)pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, hPDOSMemHandle); err2: if (!psDeviceNode->psLocalDevMemArena) OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE, pvPDCpuVAddr, hPDOSMemHandle); else RA_Free(psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); err1: OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT), psMMUContext, NULL); return PVRSRV_ERROR_GENERIC; }
static void _DeferredFreePageTable(struct MMU_HEAP *pMMUHeap, u32 ui32PTIndex) { u32 *pui32PDEntry; u32 i; u32 ui32PDIndex; struct SYS_DATA *psSysData; struct MMU_PT_INFO **ppsPTInfoList; if (SysAcquireData(&psSysData) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePageTables: " "ERROR call to SysAcquireData failed"); return; } ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; { PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0); } PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PTPageCount); if (ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr) PDUMPFREEPAGETABLE(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr); switch (pMMUHeap->psDevArena->DevMemHeapType) { case DEVICE_MEMORY_HEAP_SHARED: case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: { struct MMU_CONTEXT *psMMUContext = (struct MMU_CONTEXT *) pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; while (psMMUContext) { pui32PDEntry = (u32 *) psMMUContext->pvPDCpuVAddr; pui32PDEntry += ui32PDIndex; pui32PDEntry[ui32PTIndex] = 0; PDUMPPAGETABLE((void *) &pui32PDEntry [ui32PTIndex], sizeof(u32), IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); psMMUContext = psMMUContext->psNext; } break; } case DEVICE_MEMORY_HEAP_PERCONTEXT: case DEVICE_MEMORY_HEAP_KERNEL: { pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr; pui32PDEntry += ui32PDIndex; pui32PDEntry[ui32PTIndex] = 0; PDUMPPAGETABLE((void *) &pui32PDEntry[ui32PTIndex], sizeof(u32), IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); break; } default: { PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"); return; } } if (ppsPTInfoList[ui32PTIndex] != NULL) { if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) { u32 *pui32Tmp; pui32Tmp = (u32 *) ppsPTInfoList[ui32PTIndex]-> PTPageCpuVAddr; for (i = 0; (i < pMMUHeap->ui32PTEntryCount) && (i < 1024); i++) pui32Tmp[i] = 0; if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo-> psLocalDevMemArena == NULL) { OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE, ppsPTInfoList[ui32PTIndex]-> PTPageCpuVAddr, ppsPTInfoList[ui32PTIndex]-> hPTPageOSMemHandle); } else { struct IMG_SYS_PHYADDR sSysPAddr; struct IMG_CPU_PHYADDR sCpuPAddr; sCpuPAddr = OSMapLinToCPUPhys(ppsPTInfoList [ui32PTIndex]-> PTPageCpuVAddr); sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); OSUnMapPhysToLin((void __force __iomem *) ppsPTInfoList[ui32PTIndex]-> PTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, ppsPTInfoList[ui32PTIndex]-> hPTPageOSMemHandle); RA_Free(pMMUHeap->psDevArena-> psDeviceMemoryHeapInfo-> psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } pMMUHeap->ui32PTEntryCount -= i; } else { pMMUHeap->ui32PTEntryCount -= 1024; } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_PT_INFO), ppsPTInfoList[ui32PTIndex], NULL); ppsPTInfoList[ui32PTIndex] = NULL; } else { pMMUHeap->ui32PTEntryCount -= 1024; } PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PTPageCount); }
static void FreeBuf(struct BM_BUF *pBuf, u32 ui32Flags) { struct BM_MAPPING *pMapping; PVR_DPF(PVR_DBG_MESSAGE, "FreeBuf: pBuf=%08X: DevVAddr=%08X CpuVAddr=%08X CpuPAddr=%08X", pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr, pBuf->CpuPAddr.uiAddr); pMapping = pBuf->pMapping; if (ui32Flags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) { if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) PVR_DPF(PVR_DBG_ERROR, "FreeBuf: " "combination of DevVAddr management " "and RAM backing mode unsupported"); else OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping, NULL); } else { if (pBuf->hOSMemHandle != pMapping->hOSMemHandle) OSReleaseSubMemHandle(pBuf->hOSMemHandle, ui32Flags); if (ui32Flags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) { RA_Free(pBuf->pMapping->pArena, pBuf->DevVAddr.uiAddr, IMG_FALSE); } else { switch (pMapping->eCpuMemoryOrigin) { case hm_wrapped: OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); break; case hm_wrapped_virtaddr: OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping->hOSMemHandle); break; case hm_wrapped_scatter: OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping-> hOSMemHandle); break; case hm_wrapped_scatter_virtaddr: OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, ui32Flags, pMapping-> hOSMemHandle); break; default: break; } DevMemoryFree(pMapping); OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping, NULL); } } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_BUF), pBuf, NULL); }
static IMG_BOOL BM_ImportMemory(void *pH, size_t uRequestSize, size_t *pActualSize, struct BM_MAPPING **ppsMapping, u32 uFlags, u32 *pBase) { struct BM_MAPPING *pMapping; struct BM_HEAP *pBMHeap = pH; struct BM_CONTEXT *pBMContext = pBMHeap->pBMContext; IMG_BOOL bResult; size_t uSize; size_t uPSize; u32 uDevVAddrAlignment = 0; PVR_DPF(PVR_DBG_MESSAGE, "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, " "uFlags=0x%x, uAlign=0x%x)", pBMContext, uRequestSize, uFlags, uDevVAddrAlignment); PVR_ASSERT(ppsMapping != NULL); PVR_ASSERT(pBMContext != NULL); if (ppsMapping == NULL) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"); goto fail_exit; } uSize = HOST_PAGEALIGN(uRequestSize); PVR_ASSERT(uSize >= uRequestSize); if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), (void **)&pMapping, NULL) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: failed struct BM_MAPPING alloc"); goto fail_exit; } pMapping->hOSMemHandle = NULL; pMapping->CpuVAddr = NULL; pMapping->DevVAddr.uiAddr = 0; pMapping->CpuPAddr.uiAddr = 0; pMapping->uSize = uSize; pMapping->pBMHeap = pBMHeap; pMapping->ui32Flags = uFlags; if (pActualSize) *pActualSize = uSize; if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) uPSize = pBMHeap->sDevArena.ui32DataPageSize; else uPSize = pMapping->uSize; if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { if (OSAllocPages(pBMHeap->ui32Attribs, uPSize, pBMHeap->sDevArena.ui32DataPageSize, (void **)&pMapping->CpuVAddr, &pMapping->hOSMemHandle) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: OSAllocPages(0x%x) failed", uPSize); goto fail_mapping_alloc; } pMapping->eCpuMemoryOrigin = hm_env; } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { struct IMG_SYS_PHYADDR sSysPAddr; PVR_ASSERT(pBMHeap->pLocalDevMemArena != NULL); if (!RA_Alloc(pBMHeap->pLocalDevMemArena, uPSize, NULL, 0, pBMHeap->sDevArena.ui32DataPageSize, (u32 *)&sSysPAddr.uiAddr)) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize); goto fail_mapping_alloc; } pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); if (OSReservePhys(pMapping->CpuPAddr, uPSize, pBMHeap->ui32Attribs, &pMapping->CpuVAddr, &pMapping->hOSMemHandle) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed"); goto fail_dev_mem_alloc; } pMapping->eCpuMemoryOrigin = hm_contiguous; } else { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type"); goto fail_mapping_alloc; } bResult = DevMemoryAlloc(pBMContext, pMapping, uFlags, uDevVAddrAlignment, &pMapping->DevVAddr); if (!bResult) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: DevMemoryAlloc(0x%x) failed", pMapping->uSize); goto fail_dev_mem_alloc; } PVR_ASSERT(uDevVAddrAlignment > 1 ? (pMapping->DevVAddr.uiAddr % uDevVAddrAlignment) == 0 : 1); *pBase = pMapping->DevVAddr.uiAddr; *ppsMapping = pMapping; PVR_DPF(PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"); return IMG_TRUE; fail_dev_mem_alloc: if (pMapping->CpuVAddr || pMapping->hOSMemHandle) { if (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) pMapping->uSize /= 2; if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) uPSize = pBMHeap->sDevArena.ui32DataPageSize; else uPSize = pMapping->uSize; if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { OSFreePages(pBMHeap->ui32Attribs, uPSize, (void *)pMapping->CpuVAddr, pMapping->hOSMemHandle); } else { struct IMG_SYS_PHYADDR sSysPAddr; if (pMapping->CpuVAddr) OSUnReservePhys(pMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, pMapping->hOSMemHandle); sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr); RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } } fail_mapping_alloc: OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping, NULL); fail_exit: return IMG_FALSE; }