Example #1
0
IMG_EXPORT PVRSRV_ERROR
PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA	*psPerProc,
							 IMG_UINT32					ui32Flags,
							 IMG_SIZE_T 				uSize,
							 PVRSRV_KERNEL_MEM_INFO 	**ppsKernelMemInfo)
{
	PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;

	if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
				  sizeof(PVRSRV_KERNEL_MEM_INFO),
				  (IMG_VOID **)&psKernelMemInfo, IMG_NULL,
				  "Kernel Memory Info") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo"));
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));

	ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
	ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
	psKernelMemInfo->ui32Flags = ui32Flags;
	psKernelMemInfo->uAllocSize = uSize;

	if(OSAllocPages(psKernelMemInfo->ui32Flags,
					psKernelMemInfo->uAllocSize,
					(IMG_UINT32)HOST_PAGESIZE(),
					IMG_NULL,
					0,
					IMG_NULL,
					&psKernelMemInfo->pvLinAddrKM,
					&psKernelMemInfo->sMemBlk.hOSMemHandle)
		!= PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block"));
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
				  sizeof(PVRSRV_KERNEL_MEM_INFO),
				  psKernelMemInfo,
				  0);
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	/* register with the resman */
	psKernelMemInfo->sMemBlk.hResItem =
				ResManRegisterRes(psPerProc->hResManContext,
								  RESMAN_TYPE_SHARED_MEM_INFO,
								  psKernelMemInfo,
								  0,
								  &FreeSharedSysMemCallBack);

	*ppsKernelMemInfo = psKernelMemInfo;

	return PVRSRV_OK;
}
enum PVRSRV_ERROR PVRSRVAllocSharedSysMemoryKM(
			struct PVRSRV_PER_PROCESS_DATA *psPerProc,
			u32 ui32Flags, u32 ui32Size,
			struct PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
{
	struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
		       sizeof(struct PVRSRV_KERNEL_MEM_INFO),
		       (void **) &psKernelMemInfo, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: "
					"Failed to alloc memory for meminfo");
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));

	ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
	ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
	psKernelMemInfo->ui32Flags = ui32Flags;
	psKernelMemInfo->ui32AllocSize = ui32Size;

	if (OSAllocPages(psKernelMemInfo->ui32Flags,
			 psKernelMemInfo->ui32AllocSize, HOST_PAGESIZE(),
			 &psKernelMemInfo->pvLinAddrKM,
			 &psKernelMemInfo->sMemBlk.hOSMemHandle) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: "
					"Failed to alloc memory for block");
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
			  sizeof(struct PVRSRV_KERNEL_MEM_INFO),
			  psKernelMemInfo, NULL);
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	psKernelMemInfo->sMemBlk.hResItem = ResManRegisterRes(
					psPerProc->hResManContext,
					RESMAN_TYPE_SHARED_MEM_INFO,
					psKernelMemInfo, 0,
					FreeSharedSysMemCallBack);

	*ppsKernelMemInfo = psKernelMemInfo;

	return PVRSRV_OK;
}
Example #3
0
static IMG_VOID SysSaveRestoreArenaLiveSegments(IMG_BOOL bSave)
{
	IMG_SIZE_T        uiBufferSize;
	static IMG_PVOID  pvBackupBuffer = IMG_NULL;
	static IMG_HANDLE hBlockAlloc;
	static IMG_UINT32 uiWriteBufferSize = 0;
	PVRSRV_ERROR eError;

	uiBufferSize = 0;


	if (gpsSysData->apsLocalDevMemArena[0] != IMG_NULL)
	{

		if (PVRSRVSaveRestoreLiveSegments((IMG_HANDLE)gpsSysData->apsLocalDevMemArena[0], IMG_NULL, &uiBufferSize, bSave) == PVRSRV_OK)
		{
			if (uiBufferSize)
			{
				if (bSave && pvBackupBuffer == IMG_NULL)
				{
					uiWriteBufferSize = uiBufferSize;

					/* Allocate OS memory in which to store the device's local memory. */
					eError = OSAllocPages(PVRSRV_HAP_KERNEL_ONLY | PVRSRV_HAP_CACHED,
										  uiBufferSize,
										  HOST_PAGESIZE(),
										  IMG_NULL,
										  0,
										  IMG_NULL,
										  &pvBackupBuffer,
										  &hBlockAlloc);
					if (eError != PVRSRV_OK)
					{
						PVR_DPF((PVR_DBG_ERROR,
								"SysSaveRestoreArenaLiveSegments: OSAllocPages(0x%" SIZE_T_FMT_LEN "x) failed:%u", uiBufferSize, eError));
						return;
					}
				}
				else
				{
					PVR_ASSERT (uiWriteBufferSize == uiBufferSize);
				}


				PVRSRVSaveRestoreLiveSegments((IMG_HANDLE)gpsSysData->apsLocalDevMemArena[0], pvBackupBuffer, &uiBufferSize, bSave);

				if (!bSave && pvBackupBuffer)
				{
					/* Free the OS memory. */
					eError = OSFreePages(PVRSRV_HAP_KERNEL_ONLY | PVRSRV_HAP_CACHED,
										 uiWriteBufferSize,
										 pvBackupBuffer,
										 hBlockAlloc);
					if (eError != PVRSRV_OK)
					{
						PVR_DPF((PVR_DBG_ERROR,	"SysSaveRestoreArenaLiveSegments: OSFreePages(0x%" SIZE_T_FMT_LEN "x) failed:%u",
								uiBufferSize, eError));
					}

					pvBackupBuffer = IMG_NULL;
				}
			}
		}
	}

}
Example #4
0
static IMG_BOOL
BM_ImportMemory (IMG_VOID *pH,
			  IMG_SIZE_T uRequestSize,
			  IMG_SIZE_T *pActualSize,
			  BM_MAPPING **ppsMapping,
			  IMG_UINT32 uFlags,
			  IMG_UINTPTR_T *pBase)
{
	BM_MAPPING *pMapping;
	BM_HEAP *pBMHeap = pH;
	BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
	IMG_BOOL bResult;
	IMG_SIZE_T uSize;
	IMG_SIZE_T uPSize;
	IMG_UINT32 uDevVAddrAlignment = 0;

	PVR_DPF ((PVR_DBG_MESSAGE,
			  "BM_ImportMemory (pBMContext=0x%x, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
			  (IMG_UINTPTR_T)pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));

	PVR_ASSERT (ppsMapping != IMG_NULL);
	PVR_ASSERT (pBMContext != IMG_NULL);

	if (ppsMapping == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"));
		goto fail_exit;
	}

	uSize = HOST_PAGEALIGN (uRequestSize);
	PVR_ASSERT (uSize >= uRequestSize);

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
						sizeof (BM_MAPPING),
						(IMG_PVOID *)&pMapping, IMG_NULL,
						"Buffer Manager Mapping") != PVRSRV_OK)
	{
		PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc"));
		goto fail_exit;
	}

	pMapping->hOSMemHandle = 0;
	pMapping->CpuVAddr = 0;
	pMapping->DevVAddr.uiAddr = 0;
	pMapping->CpuPAddr.uiAddr = 0;
	pMapping->uSize = uSize;
	pMapping->pBMHeap = pBMHeap;
	pMapping->ui32Flags = uFlags;

	
	if (pActualSize)
	{
		*pActualSize = uSize;
	}

	
	if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
	{
		uPSize = pBMHeap->sDevArena.ui32DataPageSize;
	}
	else
	{
		uPSize = pMapping->uSize;
	}

	

	if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
	{
		IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;

		
		if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
		{
			ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
			ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
		}

		
		if (OSAllocPages(ui32Attribs,
						 uPSize,
						 pBMHeap->sDevArena.ui32DataPageSize,
						 (IMG_VOID **)&pMapping->CpuVAddr,
						 &pMapping->hOSMemHandle) != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,
					"BM_ImportMemory: OSAllocPages(0x%x) failed",
					uPSize));
			goto fail_mapping_alloc;
		}

		
		pMapping->eCpuMemoryOrigin = hm_env;
	}
	else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
	{
		IMG_SYS_PHYADDR sSysPAddr;
		IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;

		
		PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);

		
		if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
		{
			ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
			ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
		}

		if (!RA_Alloc (pBMHeap->pLocalDevMemArena,
					   uPSize,
					   IMG_NULL,
					   IMG_NULL,
					   0,
					   pBMHeap->sDevArena.ui32DataPageSize,
					   0,
					   (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
		{
			PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
			goto fail_mapping_alloc;
		}

		
		pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
		if(OSReservePhys(pMapping->CpuPAddr,
						 uPSize,
						 ui32Attribs,
						 &pMapping->CpuVAddr,
						 &pMapping->hOSMemHandle) != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,	"BM_ImportMemory: OSReservePhys failed"));
			goto fail_dev_mem_alloc;
		}

		
		pMapping->eCpuMemoryOrigin = hm_contiguous;
	}
	else
	{
		PVR_DPF((PVR_DBG_ERROR,	"BM_ImportMemory: Invalid backing store type"));
		goto fail_mapping_alloc;
	}

	
	bResult = DevMemoryAlloc (pBMContext,
								pMapping,
								IMG_NULL,
								uFlags,
								uDevVAddrAlignment,
								&pMapping->DevVAddr);
	if (!bResult)
	{
		PVR_DPF((PVR_DBG_ERROR,
				"BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
				pMapping->uSize));
		goto fail_dev_mem_alloc;
	}

	
	
	PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);

	*pBase = pMapping->DevVAddr.uiAddr;
	*ppsMapping = pMapping;

	PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
	return IMG_TRUE;

fail_dev_mem_alloc:
	if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
	{
		
		if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
		{
			pMapping->uSize /= 2;
		}

		if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
		{
			uPSize = pBMHeap->sDevArena.ui32DataPageSize;
		}
		else
		{
			uPSize = pMapping->uSize;
		}

		if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
		{
			OSFreePages(pBMHeap->ui32Attribs,
						  uPSize,
						  (IMG_VOID *)pMapping->CpuVAddr,
						  pMapping->hOSMemHandle);
		}
		else
		{
			IMG_SYS_PHYADDR sSysPAddr;

			if(pMapping->CpuVAddr)
			{
				OSUnReservePhys(pMapping->CpuVAddr,
								uPSize,
								pBMHeap->ui32Attribs,
								pMapping->hOSMemHandle);
			}
			sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
			RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
		}
	}
fail_mapping_alloc:
	OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
	
fail_exit:
	return IMG_FALSE;
}
enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
			    struct MMU_CONTEXT **ppsMMUContext,
			    struct IMG_DEV_PHYADDR *psPDDevPAddr)
{
	u32 *pui32Tmp;
	u32 i;
	void *pvPDCpuVAddr;
	struct IMG_DEV_PHYADDR sPDDevPAddr;
	struct IMG_CPU_PHYADDR sCpuPAddr;
	struct IMG_SYS_PHYADDR sSysPAddr;
	struct MMU_CONTEXT *psMMUContext;
	void *hPDOSMemHandle;
	struct SYS_DATA *psSysData;
	struct PVRSRV_SGXDEV_INFO *psDevInfo;

	PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise");

	if (SysAcquireData(&psSysData) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR,
			 "MMU_Initialise: ERROR call to SysAcquireData failed");
		return PVRSRV_ERROR_GENERIC;
	}

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
		   sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL)
			!= PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR,
			 "MMU_Initialise: ERROR call to OSAllocMem failed");
		return PVRSRV_ERROR_GENERIC;
	}
	OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT));

	psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
	psMMUContext->psDevInfo = psDevInfo;

	psMMUContext->psDeviceNode = psDeviceNode;

	if (psDeviceNode->psLocalDevMemArena == NULL) {
		if (OSAllocPages
		    (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
		     SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
		     &hPDOSMemHandle) != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
					"ERROR call to OSAllocPages failed");
			goto err1;
		}

		if (pvPDCpuVAddr)
			sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
		else
			sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
		sPDDevPAddr =
		    SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
	} else {
		if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
			     SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE,
			     &(sSysPAddr.uiAddr)) != IMG_TRUE) {
			PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
					"ERROR call to RA_Alloc failed");

			goto err1;
		}

		sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
		sPDDevPAddr =
		    SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
		pvPDCpuVAddr = (void __force *)
		    OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
				   PVRSRV_HAP_WRITECOMBINE |
				   PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
		if (!pvPDCpuVAddr) {
			PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
					"ERROR failed to map page tables");

			goto err2;
		}
	}

	PDUMPCOMMENT("Alloc page directory");

	PDUMPMALLOCPAGETABLE(pvPDCpuVAddr, PDUMP_PD_UNIQUETAG);

	if (pvPDCpuVAddr) {
		pui32Tmp = (u32 *) pvPDCpuVAddr;
	} else {
		PVR_DPF(PVR_DBG_ERROR,
			 "MMU_Initialise: pvPDCpuVAddr invalid");
		goto err3;
	}

	for (i = 0; i < SGX_MMU_PD_SIZE; i++)
		pui32Tmp[i] = 0;

	PDUMPCOMMENT("Page directory contents");
	PDUMPPAGETABLE(pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, IMG_TRUE,
		       PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);

	psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
	psMMUContext->sPDDevPAddr = sPDDevPAddr;
	psMMUContext->hPDOSMemHandle = hPDOSMemHandle;

	*ppsMMUContext = psMMUContext;

	*psPDDevPAddr = sPDDevPAddr;

	psMMUContext->psNext = (struct MMU_CONTEXT *)
						psDevInfo->pvMMUContextList;
	psDevInfo->pvMMUContextList = (void *) psMMUContext;


	return PVRSRV_OK;
err3:
	if (psDeviceNode->psLocalDevMemArena)
		OSUnMapPhysToLin((void __iomem __force *)pvPDCpuVAddr,
				 SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE |
					PVRSRV_HAP_KERNEL_ONLY,
				 hPDOSMemHandle);
err2:
	if (!psDeviceNode->psLocalDevMemArena)
		OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
			    SGX_MMU_PAGE_SIZE, pvPDCpuVAddr, hPDOSMemHandle);
	else
		RA_Free(psDeviceNode->psLocalDevMemArena,
			sSysPAddr.uiAddr, IMG_FALSE);
err1:
	OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
		  psMMUContext, NULL);

	return PVRSRV_ERROR_GENERIC;
}
static IMG_BOOL _DeferredAllocPagetables(struct MMU_HEAP *pMMUHeap,
				struct IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
{
	u32 ui32PTPageCount;
	u32 ui32PDIndex;
	u32 i;
	u32 *pui32PDEntry;
	struct MMU_PT_INFO **ppsPTInfoList;
	struct SYS_DATA *psSysData;
	struct IMG_DEV_VIRTADDR sHighDevVAddr;

	PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));

	if (SysAcquireData(&psSysData) != PVRSRV_OK)
		return IMG_FALSE;

	ui32PDIndex =
	    DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);

	if ((UINT32_MAX_VALUE - DevVAddr.uiAddr) <
	    (ui32Size + (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1)) {

		sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
	} else {
		sHighDevVAddr.uiAddr = DevVAddr.uiAddr + ui32Size +
					(1 << (SGX_MMU_PAGE_SHIFT +
					       SGX_MMU_PT_SHIFT)) - 1;
	}

	ui32PTPageCount =
	    sHighDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);

	ui32PTPageCount -= ui32PDIndex;

	pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
	pui32PDEntry += ui32PDIndex;

	ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];

	PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
	PDUMPCOMMENT("Page directory mods (page count == %08X)",
		     ui32PTPageCount);

	for (i = 0; i < ui32PTPageCount; i++) {
		if (ppsPTInfoList[i] == NULL) {
			if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
				   sizeof(struct MMU_PT_INFO),
				   (void **) &ppsPTInfoList[i], NULL)
					!= PVRSRV_OK) {
				PVR_DPF(PVR_DBG_ERROR,
					"_DeferredAllocPagetables: "
					"ERROR call to OSAllocMem failed");
				return IMG_FALSE;
			}
			OSMemSet(ppsPTInfoList[i], 0,
				 sizeof(struct MMU_PT_INFO));
		}

		if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL &&
		    ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
			struct IMG_CPU_PHYADDR sCpuPAddr;
			struct IMG_DEV_PHYADDR sDevPAddr;

			PVR_ASSERT(pui32PDEntry[i] == 0);

			if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
			    psLocalDevMemArena == NULL) {
				if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
						     PVRSRV_HAP_KERNEL_ONLY,
					     SGX_MMU_PAGE_SIZE,
					     SGX_MMU_PAGE_SIZE,
					     (void **)&ppsPTInfoList[i]->
						PTPageCpuVAddr,
					     &ppsPTInfoList[i]->
						hPTPageOSMemHandle) !=
				    PVRSRV_OK) {
					PVR_DPF(PVR_DBG_ERROR,
					   "_DeferredAllocPagetables: "
					   "ERROR call to OSAllocPages failed");
					return IMG_FALSE;
				}

				if (ppsPTInfoList[i]->PTPageCpuVAddr) {
					sCpuPAddr =
					    OSMapLinToCPUPhys(ppsPTInfoList[i]->
							      PTPageCpuVAddr);
				} else {
					sCpuPAddr =
					    OSMemHandleToCpuPAddr(
						ppsPTInfoList[i]->
							  hPTPageOSMemHandle,
						0);
				}
				sDevPAddr =
				    SysCpuPAddrToDevPAddr
					    (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
			} else {
				struct IMG_SYS_PHYADDR sSysPAddr;

				if (RA_Alloc(pMMUHeap->psDevArena->
				     psDeviceMemoryHeapInfo->psLocalDevMemArena,
				     SGX_MMU_PAGE_SIZE, NULL, 0,
				     SGX_MMU_PAGE_SIZE,
				     &(sSysPAddr.uiAddr)) != IMG_TRUE) {
					PVR_DPF(PVR_DBG_ERROR,
					       "_DeferredAllocPagetables: "
					       "ERROR call to RA_Alloc failed");
					return IMG_FALSE;
				}

				sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
				ppsPTInfoList[i]->PTPageCpuVAddr =
				    (void __force *)
				    OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
						   PVRSRV_HAP_WRITECOMBINE |
						   PVRSRV_HAP_KERNEL_ONLY,
						   &ppsPTInfoList[i]->
						   hPTPageOSMemHandle);
				if (!ppsPTInfoList[i]->PTPageCpuVAddr) {
					PVR_DPF(PVR_DBG_ERROR,
					     "_DeferredAllocPagetables: "
					     "ERROR failed to map page tables");
					return IMG_FALSE;
				}

				sDevPAddr = SysCpuPAddrToDevPAddr
					    (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);

			}


			OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0,
				 SGX_MMU_PAGE_SIZE);

			PDUMPMALLOCPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
					     PDUMP_PT_UNIQUETAG);

			PDUMPPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
				       SGX_MMU_PAGE_SIZE, IMG_TRUE,
				       PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);

			switch (pMMUHeap->psDevArena->DevMemHeapType) {
			case DEVICE_MEMORY_HEAP_SHARED:
			case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
				{
					struct MMU_CONTEXT *psMMUContext =
					    (struct MMU_CONTEXT *)pMMUHeap->
						    psMMUContext->psDevInfo->
							    pvMMUContextList;

					while (psMMUContext) {
						pui32PDEntry =
						    (u32 *)psMMUContext->
								pvPDCpuVAddr;
						pui32PDEntry += ui32PDIndex;

						pui32PDEntry[i] =
						    sDevPAddr.uiAddr |
							SGX_MMU_PDE_VALID;

						PDUMPPAGETABLE
						    ((void *)&pui32PDEntry[i],
						     sizeof(u32), IMG_FALSE,
						     PDUMP_PD_UNIQUETAG,
						     PDUMP_PT_UNIQUETAG);

						psMMUContext =
						    psMMUContext->psNext;
					}
					break;
				}
			case DEVICE_MEMORY_HEAP_PERCONTEXT:
			case DEVICE_MEMORY_HEAP_KERNEL:
				{
					pui32PDEntry[i] = sDevPAddr.uiAddr |
							     SGX_MMU_PDE_VALID;

					PDUMPPAGETABLE((void *)&pui32PDEntry[i],
						       sizeof(u32), IMG_FALSE,
						       PDUMP_PD_UNIQUETAG,
						       PDUMP_PT_UNIQUETAG);

					break;
				}
			default:
				{
					PVR_DPF(PVR_DBG_ERROR,
						"_DeferredAllocPagetables: "
						"ERROR invalid heap type");
					return IMG_FALSE;
				}
			}


			MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
						     psDevInfo);
		} else {

			PVR_ASSERT(pui32PDEntry[i] != 0);
		}
	}

	return IMG_TRUE;
}
static IMG_BOOL BM_ImportMemory(void *pH, size_t uRequestSize,
		size_t *pActualSize, struct BM_MAPPING **ppsMapping,
		u32 uFlags, u32 *pBase)
{
	struct BM_MAPPING *pMapping;
	struct BM_HEAP *pBMHeap = pH;
	struct BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
	IMG_BOOL bResult;
	size_t uSize;
	size_t uPSize;
	u32 uDevVAddrAlignment = 0;

	PVR_DPF(PVR_DBG_MESSAGE,
		"BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, "
		"uFlags=0x%x, uAlign=0x%x)",
		 pBMContext, uRequestSize, uFlags, uDevVAddrAlignment);

	PVR_ASSERT(ppsMapping != NULL);
	PVR_ASSERT(pBMContext != NULL);

	if (ppsMapping == NULL) {
		PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter");
		goto fail_exit;
	}

	uSize = HOST_PAGEALIGN(uRequestSize);
	PVR_ASSERT(uSize >= uRequestSize);

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING),
		       (void **)&pMapping, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR,
			 "BM_ImportMemory: failed struct BM_MAPPING alloc");
		goto fail_exit;
	}

	pMapping->hOSMemHandle = NULL;
	pMapping->CpuVAddr = NULL;
	pMapping->DevVAddr.uiAddr = 0;
	pMapping->CpuPAddr.uiAddr = 0;
	pMapping->uSize = uSize;
	pMapping->pBMHeap = pBMHeap;
	pMapping->ui32Flags = uFlags;

	if (pActualSize)
		*pActualSize = uSize;

	if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
		uPSize = pBMHeap->sDevArena.ui32DataPageSize;
	else
		uPSize = pMapping->uSize;

	if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
		if (OSAllocPages(pBMHeap->ui32Attribs, uPSize,
				 pBMHeap->sDevArena.ui32DataPageSize,
				 (void **)&pMapping->CpuVAddr,
				 &pMapping->hOSMemHandle) != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR,
				 "BM_ImportMemory: OSAllocPages(0x%x) failed",
				 uPSize);
			goto fail_mapping_alloc;
		}

		pMapping->eCpuMemoryOrigin = hm_env;
	} else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
		struct IMG_SYS_PHYADDR sSysPAddr;

		PVR_ASSERT(pBMHeap->pLocalDevMemArena != NULL);

		if (!RA_Alloc(pBMHeap->pLocalDevMemArena, uPSize, NULL, 0,
			      pBMHeap->sDevArena.ui32DataPageSize,
			      (u32 *)&sSysPAddr.uiAddr)) {
			PVR_DPF(PVR_DBG_ERROR,
				 "BM_ImportMemory: RA_Alloc(0x%x) FAILED",
				 uPSize);
			goto fail_mapping_alloc;
		}

		pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
		if (OSReservePhys(pMapping->CpuPAddr, uPSize,
				  pBMHeap->ui32Attribs, &pMapping->CpuVAddr,
				  &pMapping->hOSMemHandle) != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR,
				 "BM_ImportMemory: OSReservePhys failed");
			goto fail_dev_mem_alloc;
		}

		pMapping->eCpuMemoryOrigin = hm_contiguous;
	} else {
		PVR_DPF(PVR_DBG_ERROR,
			 "BM_ImportMemory: Invalid backing store type");
		goto fail_mapping_alloc;
	}

	bResult = DevMemoryAlloc(pBMContext, pMapping, uFlags,
				 uDevVAddrAlignment, &pMapping->DevVAddr);
	if (!bResult) {
		PVR_DPF(PVR_DBG_ERROR,
			 "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
			 pMapping->uSize);
		goto fail_dev_mem_alloc;
	}

	PVR_ASSERT(uDevVAddrAlignment > 1 ?
		   (pMapping->DevVAddr.uiAddr % uDevVAddrAlignment) == 0 : 1);

	*pBase = pMapping->DevVAddr.uiAddr;
	*ppsMapping = pMapping;

	PVR_DPF(PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE");
	return IMG_TRUE;

fail_dev_mem_alloc:
	if (pMapping->CpuVAddr || pMapping->hOSMemHandle) {
		if (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
			pMapping->uSize /= 2;

		if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
			uPSize = pBMHeap->sDevArena.ui32DataPageSize;
		else
			uPSize = pMapping->uSize;

		if (pBMHeap->ui32Attribs &
			PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
			OSFreePages(pBMHeap->ui32Attribs, uPSize,
				    (void *)pMapping->CpuVAddr,
				    pMapping->hOSMemHandle);
		} else {
			struct IMG_SYS_PHYADDR sSysPAddr;

			if (pMapping->CpuVAddr)
				OSUnReservePhys(pMapping->CpuVAddr, uPSize,
						pBMHeap->ui32Attribs,
						pMapping->hOSMemHandle);
			sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
			RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr,
				IMG_FALSE);
		}
	}
fail_mapping_alloc:
	OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping,
		  NULL);
fail_exit:
	return IMG_FALSE;
}