void BM_Free(void *hBuf, u32 ui32Flags) { struct BM_BUF *pBuf = (struct BM_BUF *)hBuf; struct SYS_DATA *psSysData; struct IMG_SYS_PHYADDR sHashAddr; PVR_DPF(PVR_DBG_MESSAGE, "BM_Free (h=%08X)", hBuf); PVR_ASSERT(pBuf != NULL); if (pBuf == NULL) { PVR_DPF(PVR_DBG_ERROR, "BM_Free: invalid parameter"); return; } if (SysAcquireData(&psSysData) != PVRSRV_OK) return; pBuf->ui32RefCount--; if (pBuf->ui32RefCount == 0) { struct BM_MAPPING *map = pBuf->pMapping; struct BM_CONTEXT *ctx = map->pBMHeap->pBMContext; if (map->eCpuMemoryOrigin == hm_wrapped || map->eCpuMemoryOrigin == hm_wrapped_virtaddr) { sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr); HASH_Remove(ctx->pBufferHash, (u32)sHashAddr.uiAddr); } FreeBuf(pBuf, ui32Flags); pvr_put_ctx(ctx); } }
static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData) { #if defined(NO_HARDWARE) PVRSRV_ERROR eError; IMG_CPU_PHYADDR sCpuPAddr; #endif PVR_UNREFERENCED_PARAMETER(psSysData); gsSGXDeviceMap.ui32Flags = 0x0; #if defined(NO_HARDWARE) eError = OSBaseAllocContigMemory(SYS_OMAP3430_SGX_REGS_SIZE, &gsSGXRegsCPUVAddr, &sCpuPAddr); if(eError != PVRSRV_OK) { return eError; } gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr; gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase); gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE; #if defined(__linux__) gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; #else gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL; #endif OSMemSet(gsSGXRegsCPUVAddr, 0, SYS_OMAP3430_SGX_REGS_SIZE); gsSGXDeviceMap.ui32IRQ = 0; #else gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP3430_SGX_REGS_SYS_PHYS_BASE; gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3430_SGX_REGS_SIZE; gsSGXDeviceMap.ui32IRQ = SYS_OMAP3430_SGX_IRQ; #endif #if defined(PDUMP) { static IMG_CHAR pszPDumpDevName[] = "SGXMEM"; gsSGXDeviceMap.pszPDumpDevName = pszPDumpDevName; } #endif return PVRSRV_OK; }
IMG_VOID BM_Free (BM_HANDLE hBuf, IMG_UINT32 ui32Flags) { BM_BUF *pBuf = (BM_BUF *)hBuf; SYS_DATA *psSysData; IMG_SYS_PHYADDR sHashAddr; PVR_DPF ((PVR_DBG_MESSAGE, "BM_Free (h=0x%x)", (IMG_UINTPTR_T)hBuf)); PVR_ASSERT (pBuf!=IMG_NULL); if (pBuf == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "BM_Free: invalid parameter")); return; } SysAcquireData(&psSysData); pBuf->ui32RefCount--; if(pBuf->ui32RefCount == 0) { if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr) { sHashAddr = SysCpuPAddrToSysPAddr(pBuf->CpuPAddr); HASH_Remove (pBuf->pMapping->pBMHeap->pBMContext->pBufferHash, (IMG_UINTPTR_T)sHashAddr.uiAddr); } FreeBuf (pBuf, ui32Flags, IMG_TRUE); } }
static void BM_FreeMemory(void *h, u32 _base, struct BM_MAPPING *psMapping) { struct BM_HEAP *pBMHeap = h; size_t uPSize; PVR_UNREFERENCED_PARAMETER(_base); PVR_DPF(PVR_DBG_MESSAGE, "BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, psMapping); PVR_ASSERT(psMapping != NULL); if (psMapping == NULL) { PVR_DPF(PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter"); return; } DevMemoryFree(psMapping); if ((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0) psMapping->uSize /= 2; if (psMapping->ui32Flags & PVRSRV_MEM_DUMMY) uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize; else uPSize = psMapping->uSize; if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { OSFreePages(pBMHeap->ui32Attribs, uPSize, (void *)psMapping->CpuVAddr, psMapping->hOSMemHandle); } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { struct IMG_SYS_PHYADDR sSysPAddr; OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle); sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr); RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } else { PVR_DPF(PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type"); } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), psMapping, NULL); PVR_DPF(PVR_DBG_MESSAGE, "..BM_FreeMemory (h=%08X, base=0x%x, psMapping=0x%x)", h, _base, psMapping); }
static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData) { // PVRSRV_ERROR eError; // IMG_CPU_PHYADDR sCpuPAddr; PVR_UNREFERENCED_PARAMETER(psSysData); #if 0 gsSGXDeviceMap.ui32Flags = 0x0; sCpuPAddr.uiAddr = SGX540_BASEADDR; gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr; gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase);; gsSGXDeviceMap.ui32RegsSize = SGX_REG_SIZE; // gsSGXDeviceMap.pvRegsCpuVBase = (IMG_CPU_VIRTADDR)io; #else gsSGXDeviceMap.sRegsSysPBase.uiAddr = SGX540_BASEADDR; gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); gsSGXDeviceMap.ui32RegsSize = SGX_REG_SIZE; gsSGXDeviceMap.ui32IRQ = SGX540_IRQ; #endif #if defined(SGX_FEATURE_HOST_PORT) gsSGXDeviceMap.sHPSysPBase.uiAddr = 0; gsSGXDeviceMap.sHPCpuPBase.uiAddr = 0; gsSGXDeviceMap.ui32HPSize = 0; #endif gsSGXDeviceMap.sLocalMemSysPBase.uiAddr = 0; gsSGXDeviceMap.sLocalMemDevPBase.uiAddr = 0; gsSGXDeviceMap.sLocalMemCpuPBase.uiAddr = 0; gsSGXDeviceMap.ui32LocalMemSize = 0; gsSGXDeviceMap.ui32IRQ = SGX540_IRQ; return PVRSRV_OK; }
IMG_SYS_PHYADDR BM_HandleToSysPaddr (BM_HANDLE hBuf) { BM_BUF *pBuf = (BM_BUF *)hBuf; PVR_ASSERT (pBuf != IMG_NULL); if (pBuf == IMG_NULL) { IMG_SYS_PHYADDR PhysAddr = {0}; PVR_DPF((PVR_DBG_ERROR, "BM_HandleToSysPaddr: invalid parameter")); return PhysAddr; } PVR_DPF ((PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=0x%x)=%08X", (IMG_UINTPTR_T)hBuf, pBuf->CpuPAddr.uiAddr)); return SysCpuPAddrToSysPAddr (pBuf->CpuPAddr); }
struct IMG_SYS_PHYADDR BM_HandleToSysPaddr(void *hBuf) { struct BM_BUF *pBuf = (struct BM_BUF *)hBuf; PVR_ASSERT(pBuf != NULL); if (pBuf == NULL) { struct IMG_SYS_PHYADDR PhysAddr = { 0 }; PVR_DPF(PVR_DBG_ERROR, "BM_HandleToSysPaddr: invalid parameter"); return PhysAddr; } PVR_DPF(PVR_DBG_MESSAGE, "BM_HandleToSysPaddr(h=%08X)=%08X", hBuf, pBuf->CpuPAddr.uiAddr); return SysCpuPAddrToSysPAddr(pBuf->CpuPAddr); }
static IMG_VOID BM_FreeMemory (IMG_VOID *h, IMG_UINTPTR_T _base, BM_MAPPING *psMapping) { BM_HEAP *pBMHeap = h; IMG_SIZE_T uPSize; PVR_UNREFERENCED_PARAMETER (_base); PVR_DPF ((PVR_DBG_MESSAGE, "BM_FreeMemory (h=0x%x, base=0x%x, psMapping=0x%x)", (IMG_UINTPTR_T)h, _base, (IMG_UINTPTR_T)psMapping)); PVR_ASSERT (psMapping != IMG_NULL); if (psMapping == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: invalid parameter")); return; } DevMemoryFree (psMapping); if((psMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) != 0) { psMapping->uSize /= 2; } if(psMapping->ui32Flags & PVRSRV_MEM_DUMMY) { uPSize = psMapping->pBMHeap->sDevArena.ui32DataPageSize; } else { uPSize = psMapping->uSize; } if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { OSFreePages(pBMHeap->ui32Attribs, uPSize, (IMG_VOID *) psMapping->CpuVAddr, psMapping->hOSMemHandle); } else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { IMG_SYS_PHYADDR sSysPAddr; OSUnReservePhys(psMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, psMapping->hOSMemHandle); sSysPAddr = SysCpuPAddrToSysPAddr(psMapping->CpuPAddr); RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } else { PVR_DPF((PVR_DBG_ERROR, "BM_FreeMemory: Invalid backing store type")); } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), psMapping, IMG_NULL); PVR_DPF((PVR_DBG_MESSAGE, "..BM_FreeMemory (h=0x%x, base=0x%x)", (IMG_UINTPTR_T)h, _base)); }
static IMG_BOOL BM_ImportMemory (IMG_VOID *pH, IMG_SIZE_T uRequestSize, IMG_SIZE_T *pActualSize, BM_MAPPING **ppsMapping, IMG_UINT32 uFlags, IMG_UINTPTR_T *pBase) { BM_MAPPING *pMapping; BM_HEAP *pBMHeap = pH; BM_CONTEXT *pBMContext = pBMHeap->pBMContext; IMG_BOOL bResult; IMG_SIZE_T uSize; IMG_SIZE_T uPSize; IMG_UINT32 uDevVAddrAlignment = 0; PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory (pBMContext=0x%x, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)", (IMG_UINTPTR_T)pBMContext, uRequestSize, uFlags, uDevVAddrAlignment)); PVR_ASSERT (ppsMapping != IMG_NULL); PVR_ASSERT (pBMContext != IMG_NULL); if (ppsMapping == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter")); goto fail_exit; } uSize = HOST_PAGEALIGN (uRequestSize); PVR_ASSERT (uSize >= uRequestSize); if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_MAPPING), (IMG_PVOID *)&pMapping, IMG_NULL, "Buffer Manager Mapping") != PVRSRV_OK) { PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc")); goto fail_exit; } pMapping->hOSMemHandle = 0; pMapping->CpuVAddr = 0; pMapping->DevVAddr.uiAddr = 0; pMapping->CpuPAddr.uiAddr = 0; pMapping->uSize = uSize; pMapping->pBMHeap = pBMHeap; pMapping->ui32Flags = uFlags; if (pActualSize) { *pActualSize = uSize; } if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) { uPSize = pBMHeap->sDevArena.ui32DataPageSize; } else { uPSize = pMapping->uSize; } if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs; if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) { ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); } if (OSAllocPages(ui32Attribs, uPSize, pBMHeap->sDevArena.ui32DataPageSize, (IMG_VOID **)&pMapping->CpuVAddr, &pMapping->hOSMemHandle) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSAllocPages(0x%x) failed", uPSize)); goto fail_mapping_alloc; } pMapping->eCpuMemoryOrigin = hm_env; } else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { IMG_SYS_PHYADDR sSysPAddr; IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs; PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL); if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK) { ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK; ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK); } if (!RA_Alloc (pBMHeap->pLocalDevMemArena, uPSize, IMG_NULL, IMG_NULL, 0, pBMHeap->sDevArena.ui32DataPageSize, 0, (IMG_UINTPTR_T *)&sSysPAddr.uiAddr)) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize)); goto fail_mapping_alloc; } pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); if(OSReservePhys(pMapping->CpuPAddr, uPSize, ui32Attribs, &pMapping->CpuVAddr, &pMapping->hOSMemHandle) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed")); goto fail_dev_mem_alloc; } pMapping->eCpuMemoryOrigin = hm_contiguous; } else { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type")); goto fail_mapping_alloc; } bResult = DevMemoryAlloc (pBMContext, pMapping, IMG_NULL, uFlags, uDevVAddrAlignment, &pMapping->DevVAddr); if (!bResult) { PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: DevMemoryAlloc(0x%x) failed", pMapping->uSize)); goto fail_dev_mem_alloc; } PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1); *pBase = pMapping->DevVAddr.uiAddr; *ppsMapping = pMapping; PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE")); return IMG_TRUE; fail_dev_mem_alloc: if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle)) { if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) { pMapping->uSize /= 2; } if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY) { uPSize = pBMHeap->sDevArena.ui32DataPageSize; } else { uPSize = pMapping->uSize; } if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { OSFreePages(pBMHeap->ui32Attribs, uPSize, (IMG_VOID *)pMapping->CpuVAddr, pMapping->hOSMemHandle); } else { IMG_SYS_PHYADDR sSysPAddr; if(pMapping->CpuVAddr) { OSUnReservePhys(pMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, pMapping->hOSMemHandle); } sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr); RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } } fail_mapping_alloc: OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL); fail_exit: return IMG_FALSE; }
static IMG_BOOL DevMemoryAlloc (BM_CONTEXT *pBMContext, BM_MAPPING *pMapping, IMG_SIZE_T *pActualSize, IMG_UINT32 uFlags, IMG_UINT32 dev_vaddr_alignment, IMG_DEV_VIRTADDR *pDevVAddr) { PVRSRV_DEVICE_NODE *psDeviceNode; #ifdef PDUMP IMG_UINT32 ui32PDumpSize = pMapping->uSize; #endif psDeviceNode = pBMContext->psDeviceNode; if(uFlags & PVRSRV_MEM_INTERLEAVED) { pMapping->uSize *= 2; } #ifdef PDUMP if(uFlags & PVRSRV_MEM_DUMMY) { ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize; } #endif if (!psDeviceNode->pfnMMUAlloc (pMapping->pBMHeap->pMMUHeap, pMapping->uSize, pActualSize, 0, dev_vaddr_alignment, &(pMapping->DevVAddr))) { PVR_DPF((PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc")); return IMG_FALSE; } #ifdef SUPPORT_SGX_MMU_BYPASS EnableHostAccess(pBMContext->psMMUContext); #endif #if defined(PDUMP) PDUMPMALLOCPAGES(&psDeviceNode->sDevId, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr, pMapping->hOSMemHandle, ui32PDumpSize, pMapping->pBMHeap->sDevArena.ui32DataPageSize, #if defined(SUPPORT_PDUMP_MULTI_PROCESS) psDeviceNode->pfnMMUIsHeapShared(pMapping->pBMHeap->pMMUHeap), #else IMG_FALSE, #endif (IMG_HANDLE)pMapping); #endif switch (pMapping->eCpuMemoryOrigin) { case hm_wrapped: case hm_wrapped_virtaddr: case hm_contiguous: { psDeviceNode->pfnMMUMapPages ( pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, SysCpuPAddrToSysPAddr (pMapping->CpuPAddr), pMapping->uSize, uFlags, (IMG_HANDLE)pMapping); *pDevVAddr = pMapping->DevVAddr; break; } case hm_env: { psDeviceNode->pfnMMUMapShadow ( pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, pMapping->uSize, pMapping->CpuVAddr, pMapping->hOSMemHandle, pDevVAddr, uFlags, (IMG_HANDLE)pMapping); break; } case hm_wrapped_scatter: case hm_wrapped_scatter_virtaddr: { psDeviceNode->pfnMMUMapScatter (pMapping->pBMHeap->pMMUHeap, pMapping->DevVAddr, pMapping->psSysAddr, pMapping->uSize, uFlags, (IMG_HANDLE)pMapping); *pDevVAddr = pMapping->DevVAddr; break; } default: PVR_DPF((PVR_DBG_ERROR, "Illegal value %d for pMapping->eCpuMemoryOrigin", pMapping->eCpuMemoryOrigin)); return IMG_FALSE; } #ifdef SUPPORT_SGX_MMU_BYPASS DisableHostAccess(pBMContext->psMMUContext); #endif return IMG_TRUE; }
void MMU_Finalise(struct MMU_CONTEXT *psMMUContext) { u32 *pui32Tmp, i; struct SYS_DATA *psSysData; struct MMU_CONTEXT **ppsMMUContext; if (SysAcquireData(&psSysData) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "MMU_Finalise: ERROR call to SysAcquireData failed"); return; } PDUMPCOMMENT("Free page directory"); PDUMPFREEPAGETABLE(psMMUContext->pvPDCpuVAddr); pui32Tmp = (u32 *) psMMUContext->pvPDCpuVAddr; for (i = 0; i < SGX_MMU_PD_SIZE; i++) pui32Tmp[i] = 0; if (psMMUContext->psDeviceNode->psLocalDevMemArena == NULL) { OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE, psMMUContext->pvPDCpuVAddr, psMMUContext->hPDOSMemHandle); } else { struct IMG_SYS_PHYADDR sSysPAddr; struct IMG_CPU_PHYADDR sCpuPAddr; sCpuPAddr = OSMapLinToCPUPhys(psMMUContext->pvPDCpuVAddr); sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); OSUnMapPhysToLin((void __iomem __force *) psMMUContext->pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, psMMUContext->hPDOSMemHandle); RA_Free(psMMUContext->psDeviceNode->psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } PVR_DPF(PVR_DBG_MESSAGE, "MMU_Finalise"); ppsMMUContext = (struct MMU_CONTEXT **) &psMMUContext->psDevInfo->pvMMUContextList; while (*ppsMMUContext) { if (*ppsMMUContext == psMMUContext) { *ppsMMUContext = psMMUContext->psNext; break; } ppsMMUContext = &((*ppsMMUContext)->psNext); } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT), psMMUContext, NULL); }
static void _DeferredFreePageTable(struct MMU_HEAP *pMMUHeap, u32 ui32PTIndex) { u32 *pui32PDEntry; u32 i; u32 ui32PDIndex; struct SYS_DATA *psSysData; struct MMU_PT_INFO **ppsPTInfoList; if (SysAcquireData(&psSysData) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePageTables: " "ERROR call to SysAcquireData failed"); return; } ui32PDIndex = pMMUHeap->psDevArena->BaseDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT); ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex]; { PVR_ASSERT(ppsPTInfoList[ui32PTIndex] == NULL || ppsPTInfoList[ui32PTIndex]->ui32ValidPTECount == 0); } PDUMPCOMMENT("Free page table (page count == %08X)", pMMUHeap->ui32PTPageCount); if (ppsPTInfoList[ui32PTIndex] && ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr) PDUMPFREEPAGETABLE(ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr); switch (pMMUHeap->psDevArena->DevMemHeapType) { case DEVICE_MEMORY_HEAP_SHARED: case DEVICE_MEMORY_HEAP_SHARED_EXPORTED: { struct MMU_CONTEXT *psMMUContext = (struct MMU_CONTEXT *) pMMUHeap->psMMUContext->psDevInfo->pvMMUContextList; while (psMMUContext) { pui32PDEntry = (u32 *) psMMUContext->pvPDCpuVAddr; pui32PDEntry += ui32PDIndex; pui32PDEntry[ui32PTIndex] = 0; PDUMPPAGETABLE((void *) &pui32PDEntry [ui32PTIndex], sizeof(u32), IMG_FALSE, PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG); psMMUContext = psMMUContext->psNext; } break; } case DEVICE_MEMORY_HEAP_PERCONTEXT: case DEVICE_MEMORY_HEAP_KERNEL: { pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr; pui32PDEntry += ui32PDIndex; pui32PDEntry[ui32PTIndex] = 0; PDUMPPAGETABLE((void *) &pui32PDEntry[ui32PTIndex], sizeof(u32), IMG_FALSE, PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG); break; } default: { PVR_DPF(PVR_DBG_ERROR, "_DeferredFreePagetable: ERROR invalid heap type"); return; } } if (ppsPTInfoList[ui32PTIndex] != NULL) { if (ppsPTInfoList[ui32PTIndex]->PTPageCpuVAddr != NULL) { u32 *pui32Tmp; pui32Tmp = (u32 *) ppsPTInfoList[ui32PTIndex]-> PTPageCpuVAddr; for (i = 0; (i < pMMUHeap->ui32PTEntryCount) && (i < 1024); i++) pui32Tmp[i] = 0; if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo-> psLocalDevMemArena == NULL) { OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, SGX_MMU_PAGE_SIZE, ppsPTInfoList[ui32PTIndex]-> PTPageCpuVAddr, ppsPTInfoList[ui32PTIndex]-> hPTPageOSMemHandle); } else { struct IMG_SYS_PHYADDR sSysPAddr; struct IMG_CPU_PHYADDR sCpuPAddr; sCpuPAddr = OSMapLinToCPUPhys(ppsPTInfoList [ui32PTIndex]-> PTPageCpuVAddr); sSysPAddr = SysCpuPAddrToSysPAddr(sCpuPAddr); OSUnMapPhysToLin((void __force __iomem *) ppsPTInfoList[ui32PTIndex]-> PTPageCpuVAddr, SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY, ppsPTInfoList[ui32PTIndex]-> hPTPageOSMemHandle); RA_Free(pMMUHeap->psDevArena-> psDeviceMemoryHeapInfo-> psLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } pMMUHeap->ui32PTEntryCount -= i; } else { pMMUHeap->ui32PTEntryCount -= 1024; } OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_PT_INFO), ppsPTInfoList[ui32PTIndex], NULL); ppsPTInfoList[ui32PTIndex] = NULL; } else { pMMUHeap->ui32PTEntryCount -= 1024; } PDUMPCOMMENT("Finished free page table (page count == %08X)", pMMUHeap->ui32PTPageCount); }
static IMG_BOOL BM_ImportMemory(void *pH, size_t uRequestSize, size_t *pActualSize, struct BM_MAPPING **ppsMapping, u32 uFlags, u32 *pBase) { struct BM_MAPPING *pMapping; struct BM_HEAP *pBMHeap = pH; struct BM_CONTEXT *pBMContext = pBMHeap->pBMContext; IMG_BOOL bResult; size_t uSize; size_t uPSize; u32 uDevVAddrAlignment = 0; PVR_DPF(PVR_DBG_MESSAGE, "BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, " "uFlags=0x%x, uAlign=0x%x)", pBMContext, uRequestSize, uFlags, uDevVAddrAlignment); PVR_ASSERT(ppsMapping != NULL); PVR_ASSERT(pBMContext != NULL); if (ppsMapping == NULL) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"); goto fail_exit; } uSize = HOST_PAGEALIGN(uRequestSize); PVR_ASSERT(uSize >= uRequestSize); if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), (void **)&pMapping, NULL) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: failed struct BM_MAPPING alloc"); goto fail_exit; } pMapping->hOSMemHandle = NULL; pMapping->CpuVAddr = NULL; pMapping->DevVAddr.uiAddr = 0; pMapping->CpuPAddr.uiAddr = 0; pMapping->uSize = uSize; pMapping->pBMHeap = pBMHeap; pMapping->ui32Flags = uFlags; if (pActualSize) *pActualSize = uSize; if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) uPSize = pBMHeap->sDevArena.ui32DataPageSize; else uPSize = pMapping->uSize; if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { if (OSAllocPages(pBMHeap->ui32Attribs, uPSize, pBMHeap->sDevArena.ui32DataPageSize, (void **)&pMapping->CpuVAddr, &pMapping->hOSMemHandle) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: OSAllocPages(0x%x) failed", uPSize); goto fail_mapping_alloc; } pMapping->eCpuMemoryOrigin = hm_env; } else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) { struct IMG_SYS_PHYADDR sSysPAddr; PVR_ASSERT(pBMHeap->pLocalDevMemArena != NULL); if (!RA_Alloc(pBMHeap->pLocalDevMemArena, uPSize, NULL, 0, pBMHeap->sDevArena.ui32DataPageSize, (u32 *)&sSysPAddr.uiAddr)) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize); goto fail_mapping_alloc; } pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr); if (OSReservePhys(pMapping->CpuPAddr, uPSize, pBMHeap->ui32Attribs, &pMapping->CpuVAddr, &pMapping->hOSMemHandle) != PVRSRV_OK) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: OSReservePhys failed"); goto fail_dev_mem_alloc; } pMapping->eCpuMemoryOrigin = hm_contiguous; } else { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: Invalid backing store type"); goto fail_mapping_alloc; } bResult = DevMemoryAlloc(pBMContext, pMapping, uFlags, uDevVAddrAlignment, &pMapping->DevVAddr); if (!bResult) { PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: DevMemoryAlloc(0x%x) failed", pMapping->uSize); goto fail_dev_mem_alloc; } PVR_ASSERT(uDevVAddrAlignment > 1 ? (pMapping->DevVAddr.uiAddr % uDevVAddrAlignment) == 0 : 1); *pBase = pMapping->DevVAddr.uiAddr; *ppsMapping = pMapping; PVR_DPF(PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"); return IMG_TRUE; fail_dev_mem_alloc: if (pMapping->CpuVAddr || pMapping->hOSMemHandle) { if (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED) pMapping->uSize /= 2; if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY) uPSize = pBMHeap->sDevArena.ui32DataPageSize; else uPSize = pMapping->uSize; if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) { OSFreePages(pBMHeap->ui32Attribs, uPSize, (void *)pMapping->CpuVAddr, pMapping->hOSMemHandle); } else { struct IMG_SYS_PHYADDR sSysPAddr; if (pMapping->CpuVAddr) OSUnReservePhys(pMapping->CpuVAddr, uPSize, pBMHeap->ui32Attribs, pMapping->hOSMemHandle); sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr); RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE); } } fail_mapping_alloc: OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping, NULL); fail_exit: return IMG_FALSE; }
static IMG_BOOL DevMemoryAlloc(struct BM_CONTEXT *pBMContext, struct BM_MAPPING *pMapping, u32 uFlags, u32 dev_vaddr_alignment, struct IMG_DEV_VIRTADDR *pDevVAddr) { struct PVRSRV_DEVICE_NODE *psDeviceNode = pBMContext->psDeviceNode; if (uFlags & PVRSRV_MEM_INTERLEAVED) pMapping->uSize *= 2; if (!psDeviceNode->pfnMMUAlloc(pMapping->pBMHeap->pMMUHeap, pMapping->uSize, 0, dev_vaddr_alignment, &(pMapping->DevVAddr))) { PVR_DPF(PVR_DBG_ERROR, "DevMemoryAlloc ERROR MMU_Alloc"); return IMG_FALSE; } #ifdef PDUMP { u32 ui32PDumpSize = pMapping->uSize; if (uFlags & PVRSRV_MEM_DUMMY) ui32PDumpSize = pMapping->pBMHeap->sDevArena.ui32DataPageSize; PDUMPMALLOCPAGES(pMapping->DevVAddr.uiAddr, pMapping->hOSMemHandle, ui32PDumpSize, (void *)pMapping); } #endif switch (pMapping->eCpuMemoryOrigin) { case hm_wrapped: case hm_wrapped_virtaddr: case hm_contiguous: { psDeviceNode->pfnMMUMapPages(pMapping->pBMHeap-> pMMUHeap, pMapping->DevVAddr, SysCpuPAddrToSysPAddr (pMapping->CpuPAddr), pMapping->uSize, uFlags, (void *)pMapping); *pDevVAddr = pMapping->DevVAddr; break; } case hm_env: { psDeviceNode->pfnMMUMapShadow(pMapping->pBMHeap-> pMMUHeap, pMapping->DevVAddr, pMapping->uSize, pMapping->CpuVAddr, pMapping->hOSMemHandle, pDevVAddr, uFlags, (void *)pMapping); break; } case hm_wrapped_scatter: case hm_wrapped_scatter_virtaddr: { psDeviceNode->pfnMMUMapScatter(pMapping->pBMHeap-> pMMUHeap, pMapping->DevVAddr, pMapping->psSysAddr, pMapping->uSize, uFlags, (void *)pMapping); *pDevVAddr = pMapping->DevVAddr; break; } default: PVR_DPF(PVR_DBG_ERROR, "Illegal value %d for pMapping->eCpuMemoryOrigin", pMapping->eCpuMemoryOrigin); return IMG_FALSE; } return IMG_TRUE; }
static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData) { #if defined(NO_HARDWARE) PVRSRV_ERROR eError; IMG_CPU_PHYADDR sCpuPAddr; #else #if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) struct resource *dev_res; int dev_irq; #endif #endif PVR_UNREFERENCED_PARAMETER(psSysData); gsSGXDeviceMap.ui32Flags = 0x0; #if defined(NO_HARDWARE) gsSGXDeviceMap.ui32RegsSize = SYS_TI335x_SGX_REGS_SIZE; eError = OSBaseAllocContigMemory(gsSGXDeviceMap.ui32RegsSize, &gsSGXRegsCPUVAddr, &sCpuPAddr); if(eError != PVRSRV_OK) { return eError; } gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr; gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase); #if defined(__linux__) gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; #else gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL; #endif OSMemSet(gsSGXRegsCPUVAddr, 0, gsSGXDeviceMap.ui32RegsSize); gsSGXDeviceMap.ui32IRQ = 0; #else #if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) dev_res = platform_get_resource(gpsPVRLDMDev, IORESOURCE_MEM, 0); if (dev_res == NULL) { PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __FUNCTION__)); return PVRSRV_ERROR_INVALID_DEVICE; } dev_irq = platform_get_irq(gpsPVRLDMDev, 0); if (dev_irq < 0) { PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __FUNCTION__, -dev_irq)); return PVRSRV_ERROR_INVALID_DEVICE; } gsSGXDeviceMap.sRegsSysPBase.uiAddr = dev_res->start; gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); PVR_TRACE(("SGX register base: 0x%lx", (unsigned long)gsSGXDeviceMap.sRegsCpuPBase.uiAddr)); gsSGXDeviceMap.ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start); PVR_TRACE(("SGX register size: %d",gsSGXDeviceMap.ui32RegsSize)); gsSGXDeviceMap.ui32IRQ = dev_irq; PVR_TRACE(("SGX IRQ: %d", gsSGXDeviceMap.ui32IRQ)); #else gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_TI335x_SGX_REGS_SYS_PHYS_BASE; gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); gsSGXDeviceMap.ui32RegsSize = SYS_TI335x_SGX_REGS_SIZE; gsSGXDeviceMap.ui32IRQ = SYS_TI335x_SGX_IRQ; #endif #if defined(SGX_OCP_REGS_ENABLED) gsSGXRegsCPUVAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase, gsSGXDeviceMap.ui32RegsSize, PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY, IMG_NULL); if (gsSGXRegsCPUVAddr == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Failed to map SGX registers")); return PVRSRV_ERROR_BAD_MAPPING; } gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; gpvOCPRegsLinAddr = gsSGXRegsCPUVAddr; #endif #endif #if defined(PDUMP) { static IMG_CHAR pszPDumpDevName[] = "SGXMEM"; gsSGXDeviceMap.pszPDumpDevName = pszPDumpDevName; } #endif return PVRSRV_OK; }
/*! ****************************************************************************** @Function SysLocateDevices @Description Specifies devices in the systems memory map @Input psSysData - sys data @Return PVRSRV_ERROR ******************************************************************************/ static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData) { #if defined(NO_HARDWARE) PVRSRV_ERROR eError; IMG_CPU_PHYADDR sCpuPAddr; #else #if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) struct resource *dev_res; int dev_irq; #endif #endif PVR_UNREFERENCED_PARAMETER(psSysData); /* SGX Device: */ gsSGXDeviceMap.ui32Flags = 0x0; #if defined(NO_HARDWARE) /* * For no hardware, allocate some contiguous memory for the * register block. */ /* Registers */ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3630_SGX_REGS_SIZE; eError = OSBaseAllocContigMemory(gsSGXDeviceMap.ui32RegsSize, &gsSGXRegsCPUVAddr, &sCpuPAddr); if(eError != PVRSRV_OK) { return eError; } gsSGXDeviceMap.sRegsCpuPBase = sCpuPAddr; gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase); #if defined(__linux__) /* Indicate the registers are already mapped */ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; #else /* * FIXME: Could we just use the virtual address returned by * OSBaseAllocContigMemory? */ gsSGXDeviceMap.pvRegsCpuVBase = IMG_NULL; #endif OSMemSet(gsSGXRegsCPUVAddr, 0, gsSGXDeviceMap.ui32RegsSize); /* device interrupt IRQ Note: no interrupts available on no hardware system */ gsSGXDeviceMap.ui32IRQ = 0; #else /* defined(NO_HARDWARE) */ #if defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) /* get the resource and IRQ through platform resource API */ dev_res = platform_get_resource(gpsPVRLDMDev, IORESOURCE_MEM, 0); if (dev_res == NULL) { PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_resource failed", __FUNCTION__)); return PVRSRV_ERROR_INVALID_DEVICE; } dev_irq = platform_get_irq(gpsPVRLDMDev, 0); if (dev_irq < 0) { PVR_DPF((PVR_DBG_ERROR, "%s: platform_get_irq failed (%d)", __FUNCTION__, -dev_irq)); return PVRSRV_ERROR_INVALID_DEVICE; } gsSGXDeviceMap.sRegsSysPBase.uiAddr = dev_res->start; gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); PVR_TRACE(("SGX register base: 0x%lx", (unsigned long)gsSGXDeviceMap.sRegsCpuPBase.uiAddr)); #if defined(SGX544) && defined(SGX_FEATURE_MP) /* FIXME: Workaround due to HWMOD change. Otherwise this region is too small. */ gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3630_SGX_REGS_SIZE; #else gsSGXDeviceMap.ui32RegsSize = (unsigned int)(dev_res->end - dev_res->start); #endif PVR_TRACE(("SGX register size: %d",gsSGXDeviceMap.ui32RegsSize)); gsSGXDeviceMap.ui32IRQ = dev_irq; PVR_TRACE(("SGX IRQ: %d", gsSGXDeviceMap.ui32IRQ)); #else /* defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) */ gsSGXDeviceMap.sRegsSysPBase.uiAddr = SYS_OMAP3630_SGX_REGS_SYS_PHYS_BASE; gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); gsSGXDeviceMap.ui32RegsSize = SYS_OMAP3630_SGX_REGS_SIZE; gsSGXDeviceMap.ui32IRQ = SYS_OMAP3630_SGX_IRQ; #endif /* defined(PVR_LINUX_DYNAMIC_SGX_RESOURCE_INFO) */ #if defined(SGX_OCP_REGS_ENABLED) gsSGXRegsCPUVAddr = OSMapPhysToLin(gsSGXDeviceMap.sRegsCpuPBase, gsSGXDeviceMap.ui32RegsSize, PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY, IMG_NULL); if (gsSGXRegsCPUVAddr == IMG_NULL) { PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Failed to map SGX registers")); return PVRSRV_ERROR_BAD_MAPPING; } /* Indicate the registers are already mapped */ gsSGXDeviceMap.pvRegsCpuVBase = gsSGXRegsCPUVAddr; gpvOCPRegsLinAddr = gsSGXRegsCPUVAddr; #endif #endif /* defined(NO_HARDWARE) */ #if defined(PDUMP) { /* initialise memory region name for pdumping */ static IMG_CHAR pszPDumpDevName[] = "SGXMEM"; gsSGXDeviceMap.pszPDumpDevName = pszPDumpDevName; } #endif /* add other devices here: */ return PVRSRV_OK; }
/***********************************************************************//** * Locate and describe our devices on the PCI bus * * Fills out the device map for all devices we know aout and control * * @returns PVRSRV_OK for success, or failure code **************************************************************************/ static PVRSRV_ERROR SysLocateDevices(SYS_DATA *psSysData) { IMG_UINT32 ui32IRQ = 0; #if !defined(NO_HARDWARE) SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData; #else PVRSRV_ERROR eError; #endif /************************ * SOC Setup * ************************/ #if !defined(NO_HARDWARE) /* Get the regions from the base address register */ gsSOCDeviceMap.sRegsSysPBase.uiAddr = OSPCIAddrRangeStart(psSysSpecData->hSGXPCI, CEDARVIEW_ADDR_RANGE_INDEX); PVR_TRACE(("uiBaseAddr: " SYSPADDR_FMT, gsSOCDeviceMap.sRegsSysPBase.uiAddr)); /* Convert it to a CPU physical address */ gsSOCDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSOCDeviceMap.sRegsSysPBase); /* * And map in the system registers. */ gsSOCDeviceMap.sRegsCpuVBase = OSMapPhysToLin(gsSOCDeviceMap.sRegsCpuPBase, SYS_SOC_REG_SIZE, PVRSRV_HAP_KERNEL_ONLY | PVRSRV_HAP_UNCACHED, IMG_NULL); SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_SOC_REGS_MAPPED); #endif /************************ * SGX Setup * ************************/ #if !defined(NO_HARDWARE) gsSGXDeviceMap.sRegsSysPBase.uiAddr = gsSOCDeviceMap.sRegsSysPBase.uiAddr + SYS_SGX_REG_OFFSET; gsSGXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsSGXDeviceMap.sRegsSysPBase); /* device interrupt IRQ */ if (OSPCIIRQ(psSysSpecData->hSGXPCI, &ui32IRQ) != PVRSRV_OK) { PVR_DPF((PVR_DBG_ERROR,"SysLocateDevices: Couldn't get IRQ")); return PVRSRV_ERROR_INVALID_DEVICE; } PVR_TRACE(("IRQ: %d", ui32IRQ)); #else /* !defined(NO_HARDWARE) */ /* * With no hardware, allocate contiguous memory to emulate the registers */ eError = OSBaseAllocContigMemory(SYS_SGX_REG_SIZE, &(gsSGXDeviceMap.pvRegsCpuVBase), &(gsSGXDeviceMap.sRegsCpuPBase)); if(eError != PVRSRV_OK) { return eError; } SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_DUMMY_SGX_REGS); OSMemSet(gsSGXDeviceMap.pvRegsCpuVBase, 0, SYS_SGX_REG_SIZE); gsSGXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsSGXDeviceMap.sRegsCpuPBase); #endif /* !defined(NO_HARDWARE) */ /* * Other setup */ gsSGXDeviceMap.ui32Flags = 0x0; gsSGXDeviceMap.ui32RegsSize = SYS_SGX_REG_SIZE; gsSGXDeviceMap.ui32IRQ = ui32IRQ; /* * Local Device Memory Region is never present */ gsSGXDeviceMap.sLocalMemSysPBase.uiAddr = 0; gsSGXDeviceMap.sLocalMemDevPBase.uiAddr = 0; gsSGXDeviceMap.sLocalMemCpuPBase.uiAddr = 0; gsSGXDeviceMap.ui32LocalMemSize = 0; #if defined(PDUMP) { /* initialise memory region name for pdumping */ static IMG_CHAR pszPDumpDevName[] = SYSTEM_PDUMP_NAME; gsSGXDeviceMap.pszPDumpDevName = pszPDumpDevName; } #endif /************************ * VXD Setup * ************************/ #if defined(SUPPORT_MSVDX) #if !defined(NO_HARDWARE) gsMSVDXDeviceMap.sRegsSysPBase.uiAddr = gsSOCDeviceMap.sRegsSysPBase.uiAddr + SYS_MSVDX_REG_OFFSET; gsMSVDXDeviceMap.sRegsCpuPBase = SysSysPAddrToCpuPAddr(gsMSVDXDeviceMap.sRegsSysPBase); #else /* No hardware registers */ eError = OSBaseAllocContigMemory(MSVDX_REG_SIZE, &(gsMSVDXDeviceMap.sRegsCpuVBase), &(gsMSVDXDeviceMap.sRegsCpuPBase)); if(eError != PVRSRV_OK) { return eError; } SYS_SPECIFIC_DATA_SET(psSysSpecData, SYS_SPECIFIC_DATA_DUMMY_MSVDX_REGS); OSMemSet(gsMSVDXDeviceMap.sRegsCpuVBase, 0, MSVDX_REG_SIZE); gsMSVDXDeviceMap.sRegsSysPBase = SysCpuPAddrToSysPAddr(gsMSVDXDeviceMap.sRegsCpuPBase); #endif /* NO_HARDWARE */ /* Common setup */ gsMSVDXDeviceMap.ui32RegsSize = MSVDX_REG_SIZE; /* * No local device memory region */ gsMSVDXDeviceMap.sLocalMemSysPBase.uiAddr = 0; gsMSVDXDeviceMap.sLocalMemDevPBase.uiAddr = 0; gsMSVDXDeviceMap.sLocalMemCpuPBase.uiAddr = 0; gsMSVDXDeviceMap.ui32LocalMemSize = 0; /* * device interrupt IRQ */ gsMSVDXDeviceMap.ui32IRQ = ui32IRQ; #if defined(PDUMP) { /* initialise memory region name for pdumping */ static IMG_CHAR pszPDumpDevName[] = SYSTEM_PDUMP_NAME; gsMSVDXDeviceMap.pszPDumpDevName = pszPDumpDevName; } #endif /* defined(PDUMP) */ #endif /* defined(SUPPORT_MSVDX) */ PVR_DPF((PVR_DBG_MESSAGE, "SGX registers base physical address: 0x" SYSPADDR_FMT, gsSGXDeviceMap.sRegsSysPBase.uiAddr)); #if defined(SUPPORT_MSVDX) PVR_DPF((PVR_DBG_MESSAGE, "VXD registers base physical address: 0x" SYSPADDR_FMT, gsMSVDXDeviceMap.sRegsSysPBase.uiAddr)); #endif return PVRSRV_OK; }