static IMG_BOOL _AllocPageTables(struct MMU_HEAP *pMMUHeap)
{
	PVR_DPF(PVR_DBG_MESSAGE, "_AllocPageTables()");

	PVR_ASSERT(pMMUHeap != NULL);
	PVR_ASSERT(HOST_PAGESIZE() == SGX_MMU_PAGE_SIZE);

	if (pMMUHeap == NULL) {
		PVR_DPF(PVR_DBG_ERROR, "_AllocPageTables: invalid parameter");
		return IMG_FALSE;
	}

	pMMUHeap->ui32PTEntryCount =
	    pMMUHeap->psDevArena->ui32Size >> SGX_MMU_PAGE_SHIFT;

	pMMUHeap->ui32PTBaseIndex =
	    (pMMUHeap->psDevArena->BaseDevVAddr.
	     uiAddr & (SGX_MMU_PD_MASK | SGX_MMU_PT_MASK)) >>
							SGX_MMU_PAGE_SHIFT;

	pMMUHeap->ui32PTPageCount =
	    (pMMUHeap->ui32PTEntryCount + SGX_MMU_PT_SIZE - 1) >>
							SGX_MMU_PT_SHIFT;

	return IMG_TRUE;
}
Beispiel #2
0
IMG_EXPORT PVRSRV_ERROR
PVRSRVAllocSharedSysMemoryKM(PVRSRV_PER_PROCESS_DATA	*psPerProc,
							 IMG_UINT32					ui32Flags,
							 IMG_SIZE_T 				uSize,
							 PVRSRV_KERNEL_MEM_INFO 	**ppsKernelMemInfo)
{
	PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;

	if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
				  sizeof(PVRSRV_KERNEL_MEM_INFO),
				  (IMG_VOID **)&psKernelMemInfo, IMG_NULL,
				  "Kernel Memory Info") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for meminfo"));
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));

	ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
	ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
	psKernelMemInfo->ui32Flags = ui32Flags;
	psKernelMemInfo->uAllocSize = uSize;

	if(OSAllocPages(psKernelMemInfo->ui32Flags,
					psKernelMemInfo->uAllocSize,
					(IMG_UINT32)HOST_PAGESIZE(),
					IMG_NULL,
					0,
					IMG_NULL,
					&psKernelMemInfo->pvLinAddrKM,
					&psKernelMemInfo->sMemBlk.hOSMemHandle)
		!= PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: Failed to alloc memory for block"));
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
				  sizeof(PVRSRV_KERNEL_MEM_INFO),
				  psKernelMemInfo,
				  0);
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	/* register with the resman */
	psKernelMemInfo->sMemBlk.hResItem =
				ResManRegisterRes(psPerProc->hResManContext,
								  RESMAN_TYPE_SHARED_MEM_INFO,
								  psKernelMemInfo,
								  0,
								  &FreeSharedSysMemCallBack);

	*ppsKernelMemInfo = psKernelMemInfo;

	return PVRSRV_OK;
}
Beispiel #3
0
IMG_VOID PDumpOSCPUVAddrToPhysPages(IMG_HANDLE hOSMemHandle,
		IMG_UINT32 ui32Offset,
		IMG_PUINT8 pui8LinAddr,
		IMG_UINT32 *pui32PageOffset)
{
	if(hOSMemHandle)
	{
		
		IMG_CPU_PHYADDR     sCpuPAddr;

		PVR_UNREFERENCED_PARAMETER(pui8LinAddr);

		sCpuPAddr = OSMemHandleToCpuPAddr(hOSMemHandle, ui32Offset);
	    *pui32PageOffset = sCpuPAddr.uiAddr & (HOST_PAGESIZE() -1);
	}
	else
	{
		PVR_UNREFERENCED_PARAMETER(hOSMemHandle);
		PVR_UNREFERENCED_PARAMETER(ui32Offset);

		*pui32PageOffset = (IMG_UINT32)pui8LinAddr & (HOST_PAGESIZE() - 1);
	}
}
enum PVRSRV_ERROR PVRSRVAllocSharedSysMemoryKM(
			struct PVRSRV_PER_PROCESS_DATA *psPerProc,
			u32 ui32Flags, u32 ui32Size,
			struct PVRSRV_KERNEL_MEM_INFO **ppsKernelMemInfo)
{
	struct PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
		       sizeof(struct PVRSRV_KERNEL_MEM_INFO),
		       (void **) &psKernelMemInfo, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: "
					"Failed to alloc memory for meminfo");
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	OSMemSet(psKernelMemInfo, 0, sizeof(*psKernelMemInfo));

	ui32Flags &= ~PVRSRV_HAP_MAPTYPE_MASK;
	ui32Flags |= PVRSRV_HAP_MULTI_PROCESS;
	psKernelMemInfo->ui32Flags = ui32Flags;
	psKernelMemInfo->ui32AllocSize = ui32Size;

	if (OSAllocPages(psKernelMemInfo->ui32Flags,
			 psKernelMemInfo->ui32AllocSize, HOST_PAGESIZE(),
			 &psKernelMemInfo->pvLinAddrKM,
			 &psKernelMemInfo->sMemBlk.hOSMemHandle) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVAllocSharedSysMemoryKM: "
					"Failed to alloc memory for block");
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
			  sizeof(struct PVRSRV_KERNEL_MEM_INFO),
			  psKernelMemInfo, NULL);
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	psKernelMemInfo->sMemBlk.hResItem = ResManRegisterRes(
					psPerProc->hResManContext,
					RESMAN_TYPE_SHARED_MEM_INFO,
					psKernelMemInfo, 0,
					FreeSharedSysMemCallBack);

	*ppsKernelMemInfo = psKernelMemInfo;

	return PVRSRV_OK;
}
enum PVRSRV_ERROR PVRSRVWrapExtMemoryKM(void *hDevCookie,
			    struct PVRSRV_PER_PROCESS_DATA *psPerProc,
			    void *hDevMemContext, u32 ui32ByteSize,
			    u32 ui32PageOffset, IMG_BOOL bPhysContig,
			    struct IMG_SYS_PHYADDR *psExtSysPAddr,
			    void *pvLinAddr,
			    struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo)
{
	struct PVRSRV_KERNEL_MEM_INFO *psMemInfo = NULL;
	struct DEVICE_MEMORY_INFO *psDevMemoryInfo;
	u32 ui32HostPageSize = HOST_PAGESIZE();
	void *hDevMemHeap = NULL;
	struct PVRSRV_DEVICE_NODE *psDeviceNode;
	void *hBuffer;
	struct PVRSRV_MEMBLK *psMemBlock;
	IMG_BOOL bBMError;
	struct BM_HEAP *psBMHeap;
	enum PVRSRV_ERROR eError;
	void *pvPageAlignedCPUVAddr;
	struct IMG_SYS_PHYADDR *psIntSysPAddr = NULL;
	void *hOSWrapMem = NULL;
	struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
	int page_count = 0;
	u32 i;

	psDeviceNode = (struct PVRSRV_DEVICE_NODE *)hDevCookie;
	PVR_ASSERT(psDeviceNode != NULL);

	if (!psDeviceNode || (!pvLinAddr && !psExtSysPAddr)) {
		PVR_DPF(PVR_DBG_ERROR,
			 "PVRSRVWrapExtMemoryKM: invalid parameter");
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	if (pvLinAddr) {
		get_page_details((u32)pvLinAddr, ui32ByteSize,
				&ui32PageOffset, &page_count);
		pvPageAlignedCPUVAddr = (void *)((u8 *) pvLinAddr -
					ui32PageOffset);

		if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
			       page_count * sizeof(struct IMG_SYS_PHYADDR),
			       (void **)&psIntSysPAddr, NULL) != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: "
					"Failed to alloc memory for block");
			return PVRSRV_ERROR_OUT_OF_MEMORY;
		}

		eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
				       page_count * ui32HostPageSize,
				       psIntSysPAddr, &hOSWrapMem);
		if (eError != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM:"
				   " Failed to alloc memory for block");
				eError = PVRSRV_ERROR_OUT_OF_MEMORY;
				goto ErrorExitPhase1;
		}
		psExtSysPAddr = psIntSysPAddr;
		bPhysContig = IMG_FALSE;
	}

	psDevMemoryInfo =
	    &((struct BM_CONTEXT *)hDevMemContext)->psDeviceNode->
							    sDevMemoryInfo;
	psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
	for (i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) {
		if (HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) ==
		    psDevMemoryInfo->ui32MappingHeapID) {
			if (psDeviceMemoryHeap[i].DevMemHeapType ==
			    DEVICE_MEMORY_HEAP_PERCONTEXT) {
				hDevMemHeap =
				    BM_CreateHeap(hDevMemContext,
						  &psDeviceMemoryHeap[i]);
			} else {
				hDevMemHeap =
				    psDevMemoryInfo->psDeviceMemoryHeap[i].
				    hDevMemHeap;
			}
			break;
		}
	}

	if (hDevMemHeap == NULL) {
		PVR_DPF(PVR_DBG_ERROR,
			 "PVRSRVWrapExtMemoryKM: unable to find mapping heap");
		eError = PVRSRV_ERROR_GENERIC;
		goto ErrorExitPhase2;
	}

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
		       sizeof(struct PVRSRV_KERNEL_MEM_INFO),
		       (void **) &psMemInfo, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: "
					"Failed to alloc memory for block");
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto ErrorExitPhase2;
	}

	OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
	psMemBlock = &(psMemInfo->sMemBlk);

	bBMError = BM_Wrap(hDevMemHeap,
			   ui32ByteSize,
			   ui32PageOffset,
			   bPhysContig,
			   psExtSysPAddr,
			   NULL, &psMemInfo->ui32Flags, &hBuffer);
	if (!bBMError) {
		PVR_DPF(PVR_DBG_ERROR,
			 "PVRSRVWrapExtMemoryKM: BM_Wrap Failed");
		eError = PVRSRV_ERROR_BAD_MAPPING;
		goto ErrorExitPhase3;
	}

	psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
	psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
	psMemBlock->hOSWrapMem = hOSWrapMem;
	psMemBlock->psIntSysPAddr = psIntSysPAddr;

	psMemBlock->hBuffer = (void *) hBuffer;

	psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
	psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
	psMemInfo->ui32AllocSize = ui32ByteSize;

	psMemInfo->pvSysBackupBuffer = NULL;

	psBMHeap = (struct BM_HEAP *)hDevMemHeap;
	hDevMemContext = (void *) psBMHeap->pBMContext;
	eError = alloc_or_reuse_syncinfo(hDevCookie,
					 hDevMemContext,
					 &psMemInfo->psKernelSyncInfo,
					 psExtSysPAddr);
	if (eError != PVRSRV_OK)
		goto ErrorExitPhase4;

	psMemInfo->ui32RefCount++;

	psMemInfo->sMemBlk.hResItem =
	    ResManRegisterRes(psPerProc->hResManContext,
			      RESMAN_TYPE_DEVICEMEM_WRAP, psMemInfo, 0,
			      UnwrapExtMemoryCallBack);

	*ppsMemInfo = psMemInfo;

	return PVRSRV_OK;

ErrorExitPhase4:
	FreeDeviceMem(psMemInfo);
	psMemInfo = NULL;

ErrorExitPhase3:
	if (psMemInfo) {
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
			  sizeof(struct PVRSRV_KERNEL_MEM_INFO), psMemInfo,
			  NULL);
	}

ErrorExitPhase2:
	if (hOSWrapMem)
		OSReleasePhysPageAddr(hOSWrapMem);

ErrorExitPhase1:
	if (psIntSysPAddr) {
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
			  page_count * sizeof(struct IMG_SYS_PHYADDR),
			  psIntSysPAddr, NULL);
	}

	return eError;
}
enum PVRSRV_ERROR PVRSRVMapDeviceMemoryKM(
		struct PVRSRV_PER_PROCESS_DATA *psPerProc,
			      struct PVRSRV_KERNEL_MEM_INFO *psSrcMemInfo,
			      void *hDstDevMemHeap,
			      struct PVRSRV_KERNEL_MEM_INFO **ppsDstMemInfo)
{
	enum PVRSRV_ERROR eError;
	u32 ui32PageOffset;
	u32 ui32HostPageSize = HOST_PAGESIZE();
	int page_count;
	int i;
	struct IMG_SYS_PHYADDR *psSysPAddr = NULL;
	struct IMG_DEV_PHYADDR sDevPAddr;
	struct BM_BUF *psBuf;
	struct IMG_DEV_VIRTADDR sDevVAddr;
	struct PVRSRV_KERNEL_MEM_INFO *psMemInfo = NULL;
	void *hBuffer;
	struct PVRSRV_MEMBLK *psMemBlock;
	IMG_BOOL bBMError;
	struct PVRSRV_DEVICE_NODE *psDeviceNode;
	void *pvPageAlignedCPUVAddr;
	struct RESMAN_MAP_DEVICE_MEM_DATA *psMapData = NULL;

	if (!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo) {
		PVR_DPF(PVR_DBG_ERROR,
			 "PVRSRVMapDeviceMemoryKM: invalid parameters");
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	*ppsDstMemInfo = NULL;

	get_page_details((u32)psSrcMemInfo->pvLinAddrKM,
			psSrcMemInfo->ui32AllocSize,
			&ui32PageOffset, &page_count);
	pvPageAlignedCPUVAddr =
	    (void *) ((u8 *) psSrcMemInfo->pvLinAddrKM -
			  ui32PageOffset);

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
		       page_count * sizeof(struct IMG_SYS_PHYADDR),
		       (void **) &psSysPAddr, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryKM: "
					"Failed to alloc memory for block");
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	psBuf = psSrcMemInfo->sMemBlk.hBuffer;

	psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;

	sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - ui32PageOffset;
	for (i = 0; i < page_count; i++) {
		eError =
		    BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);
		if (eError != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryKM: "
				"Failed to retrieve page list from device");
			goto ErrorExit;
		}

		psSysPAddr[i] =
		    SysDevPAddrToSysPAddr(psDeviceNode->sDevId.eDeviceType,
					  sDevPAddr);

		sDevVAddr.uiAddr += ui32HostPageSize;
	}

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
		       sizeof(struct RESMAN_MAP_DEVICE_MEM_DATA),
		       (void **)&psMapData, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryKM: "
				"Failed to alloc resman map data");
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto ErrorExit;
	}

	if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
		       sizeof(struct PVRSRV_KERNEL_MEM_INFO),
		       (void **)&psMemInfo, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceMemoryKM: "
				"Failed to alloc memory for block");
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto ErrorExit;
	}

	OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));

	psMemBlock = &(psMemInfo->sMemBlk);

	bBMError = BM_Wrap(hDstDevMemHeap, psSrcMemInfo->ui32AllocSize,
			   ui32PageOffset, bm_is_continuous(psBuf), psSysPAddr,
			   pvPageAlignedCPUVAddr, &psMemInfo->ui32Flags,
			   &hBuffer);

	if (!bBMError) {
		PVR_DPF(PVR_DBG_ERROR,
			 "PVRSRVMapDeviceMemoryKM: BM_Wrap Failed");
		eError = PVRSRV_ERROR_BAD_MAPPING;
		goto ErrorExit;
	}

	psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
	psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);

	psMemBlock->hBuffer = (void *) hBuffer;

	psMemBlock->psIntSysPAddr = psSysPAddr;

	psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;

	psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
	psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize;
	psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;

	psMemInfo->pvSysBackupBuffer = NULL;

	psSrcMemInfo->ui32RefCount++;

	psMapData->psMemInfo = psMemInfo;
	psMapData->psSrcMemInfo = psSrcMemInfo;

	psMemInfo->sMemBlk.hResItem =
	    ResManRegisterRes(psPerProc->hResManContext,
			      RESMAN_TYPE_DEVICEMEM_MAPPING, psMapData, 0,
			      UnmapDeviceMemoryCallBack);

	*ppsDstMemInfo = psMemInfo;

	return PVRSRV_OK;

ErrorExit:

	if (psSysPAddr) {
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
			  sizeof(struct IMG_SYS_PHYADDR), psSysPAddr, NULL);
	}

	if (psMemInfo) {
		OSFreeMem(PVRSRV_PAGEABLE_SELECT,
			  sizeof(struct PVRSRV_KERNEL_MEM_INFO), psMemInfo,
			  NULL);
	}

	if (psMapData) {
		OSFreeMem(PVRSRV_PAGEABLE_SELECT,
			  sizeof(struct RESMAN_MAP_DEVICE_MEM_DATA), psMapData,
			  NULL);
	}

	return eError;
}
PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo)
#endif
{
	SYS_DATA *psSysData;

	if(!psMiscInfo)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters"));
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	psMiscInfo->ui32StatePresent = 0;

	
	if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
										|PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
										|PVRSRV_MISC_INFO_MEMSTATS_PRESENT
										|PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT
										|PVRSRV_MISC_INFO_DDKVERSION_PRESENT
										|PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT
										|PVRSRV_MISC_INFO_RESET_PRESENT
										|PVRSRV_MISC_INFO_FREEMEM_PRESENT))
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	SysAcquireData(&psSysData);

	
	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) &&
		(psSysData->pvSOCTimerRegisterKM != IMG_NULL))
	{
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
		psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM;
		psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle;
	}
	else
	{
		psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL;
		psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL;
	}

	
	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) &&
		(psSysData->pvSOCClockGateRegsBase != IMG_NULL))
	{
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
		psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase;
		psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize;
	}

	
	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) &&
		(psMiscInfo->pszMemoryStr != IMG_NULL))
	{
		RA_ARENA			**ppArena;
		IMG_CHAR			*pszStr;
		IMG_UINT32			ui32StrLen;
		IMG_INT32			i32Count;

		pszStr = psMiscInfo->pszMemoryStr;
		ui32StrLen = psMiscInfo->ui32MemoryStrLen;

		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT;

		
		ppArena = &psSysData->apsLocalDevMemArena[0];
		while(*ppArena)
		{
			CHECK_SPACE(ui32StrLen);
			i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n");
			UPDATE_SPACE(pszStr, i32Count, ui32StrLen);

			RA_GetStats(*ppArena,
							&pszStr,
							&ui32StrLen);
			
			ppArena++;
		}

		
		
		List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList,
													&PVRSRVGetMiscInfoKM_Device_AnyVaCb,
													&ui32StrLen,
													&i32Count,
													&pszStr,
													PVRSRV_MISC_INFO_MEMSTATS_PRESENT);

		
		i32Count = OSSNPrintf(pszStr, 100, "\n");
		UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
	}

	
	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0)
		&& psMiscInfo->pszMemoryStr)
	{
		IMG_CHAR			*pszStr;
		IMG_UINT32			ui32StrLen;
		IMG_INT32			i32Count;
		
		pszStr = psMiscInfo->pszMemoryStr;
		ui32StrLen = psMiscInfo->ui32MemoryStrLen;
  
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FREEMEM_PRESENT;

		
		List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList,
													&PVRSRVGetMiscInfoKM_Device_AnyVaCb,
													&ui32StrLen,
													&i32Count,
													&pszStr,
													PVRSRV_MISC_INFO_FREEMEM_PRESENT);
		
		i32Count = OSSNPrintf(pszStr, 100, "\n");
		UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
	}

	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) &&
		(psSysData->psGlobalEventObject != IMG_NULL))
	{
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
		psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject;
	}

	

	if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL)
		&& ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL)
		&& (psMiscInfo->pszMemoryStr != IMG_NULL))
	{
		IMG_CHAR	*pszStr;
		IMG_UINT32	ui32StrLen;
		IMG_UINT32 	ui32LenStrPerNum = 12; 
		IMG_INT32	i32Count;
		IMG_INT i;
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT;

		
		psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ;
		psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN;
		psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BRANCH;
		psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD;

		pszStr = psMiscInfo->pszMemoryStr;
		ui32StrLen = psMiscInfo->ui32MemoryStrLen;

		for (i=0; i<4; i++)
		{
			if (ui32StrLen < ui32LenStrPerNum)
			{
				return PVRSRV_ERROR_INVALID_PARAMS;
			}

			i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%u", psMiscInfo->aui32DDKVersion[i]);
			UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
			if (i != 3)
			{
				i32Count = OSSNPrintf(pszStr, 2, ".");
				UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
			}
		}
	}

	if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT) != 0UL)
	{
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT;

		if(psMiscInfo->sCacheOpCtl.bDeferOp)
		{
			
			psSysData->ePendingCacheOpType = psMiscInfo->sCacheOpCtl.eCacheOpType;
		}
		else
		{
#if defined (SUPPORT_SID_INTERFACE)
			PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo = psMiscInfo->sCacheOpCtl.psKernelMemInfo;

			if(!psMiscInfo->sCacheOpCtl.psKernelMemInfo)
#else
			PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
			PVRSRV_PER_PROCESS_DATA *psPerProc;

			if(!psMiscInfo->sCacheOpCtl.u.psKernelMemInfo)
#endif
			{
				PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: "
						 "Ignoring non-deferred cache op with no meminfo"));
				return PVRSRV_ERROR_INVALID_PARAMS;
			}

			if(psSysData->ePendingCacheOpType != PVRSRV_MISC_INFO_CPUCACHEOP_NONE)
			{
				PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: "
						 "Deferred cache op is pending. It is unlikely you want "
						 "to combine deferred cache ops with immediate ones"));
			}

#if defined (SUPPORT_SID_INTERFACE)
			PVR_DBG_BREAK
#else
			
			psPerProc = PVRSRVFindPerProcessData();

			if(PVRSRVLookupHandle(psPerProc->psHandleBase,
								  (IMG_PVOID *)&psKernelMemInfo,
								  psMiscInfo->sCacheOpCtl.u.psKernelMemInfo,
								  PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK)
			{
				PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: "
						 "Can't find kernel meminfo"));
				return PVRSRV_ERROR_INVALID_PARAMS;
			}
#endif

			if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH)
			{
				if(!OSFlushCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle,
										   psMiscInfo->sCacheOpCtl.pvBaseVAddr,
										   psMiscInfo->sCacheOpCtl.ui32Length))
				{
					return PVRSRV_ERROR_CACHEOP_FAILED;
				}
			}
			else if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN)
			{
				if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle,
										   psMiscInfo->sCacheOpCtl.pvBaseVAddr,
										   psMiscInfo->sCacheOpCtl.ui32Length))
				{
					return PVRSRV_ERROR_CACHEOP_FAILED;
				}
			}
/* FIXME: Temporary fix needs to be revisited
 * LinuxMemArea struct listing is not registered for memory areas
 * wrapped through PVR2DMemWrap() call. For now, we are doing
 * cache flush/inv by grabbing the physical pages through
 * get_user_pages() for every blt call.
 */
			else if (psMiscInfo->sCacheOpCtl.eCacheOpType ==
						PVRSRV_MISC_INFO_CPUCACHEOP_CUSTOM_FLUSH)
			{
#if defined(CONFIG_OUTER_CACHE) && defined(PVR_NO_FULL_CACHE_OPS)
				if (1)
				{
					IMG_SIZE_T 	uPageOffset, uPageCount;
					IMG_VOID	*pvPageAlignedCPUVAddr;
					IMG_SYS_PHYADDR	 	*psIntSysPAddr = IMG_NULL;
					IMG_HANDLE	hOSWrapMem = IMG_NULL;
					PVRSRV_ERROR eError;
					int i;

					uPageOffset = (IMG_UINTPTR_T)psMiscInfo->sCacheOpCtl.pvBaseVAddr & (HOST_PAGESIZE() - 1);
					uPageCount =
						HOST_PAGEALIGN(psMiscInfo->sCacheOpCtl.ui32Length + uPageOffset)/HOST_PAGESIZE();
					pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psMiscInfo->sCacheOpCtl.pvBaseVAddr - uPageOffset);

					if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
						uPageCount * sizeof(IMG_SYS_PHYADDR),
						(IMG_VOID **)&psIntSysPAddr, IMG_NULL,
						"Array of Page Addresses") != PVRSRV_OK)
					{
						PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
						return PVRSRV_ERROR_OUT_OF_MEMORY;
					}

					eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
										uPageCount * HOST_PAGESIZE(),
										psIntSysPAddr,
										&hOSWrapMem);
					for (i = 0; i < uPageCount; i++)
					{
						outer_flush_range(psIntSysPAddr[i].uiAddr, psIntSysPAddr[i].uiAddr + HOST_PAGESIZE() -1);
					}

					OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
						uPageCount * sizeof(IMG_SYS_PHYADDR),
						psIntSysPAddr, IMG_NULL);

					OSReleasePhysPageAddr(hOSWrapMem);

				}
#else
				OSFlushCPUCacheKM();
#endif /* CONFIG_OUTER_CACHE && PVR_NO_FULL_CACHE_OPS*/
			}
			else if (psMiscInfo->sCacheOpCtl.eCacheOpType ==
							PVRSRV_MISC_INFO_CPUCACHEOP_CUSTOM_INV)
			{
#if defined(CONFIG_OUTER_CACHE)
				/* TODO: Need to check full cache invalidation, but
				 * currently it is not exported through
				 * outer_cache interface.
				 */
				if (1)
				{
					IMG_SIZE_T 	uPageOffset, uPageCount;
					IMG_VOID	*pvPageAlignedCPUVAddr;
					IMG_SYS_PHYADDR	 	*psIntSysPAddr = IMG_NULL;
					IMG_HANDLE	hOSWrapMem = IMG_NULL;
					PVRSRV_ERROR eError;
					int i;

					uPageOffset = (IMG_UINTPTR_T)psMiscInfo->sCacheOpCtl.pvBaseVAddr & (HOST_PAGESIZE() - 1);
					uPageCount =
						HOST_PAGEALIGN(psMiscInfo->sCacheOpCtl.ui32Length + uPageOffset)/HOST_PAGESIZE();
					pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psMiscInfo->sCacheOpCtl.pvBaseVAddr - uPageOffset);

					if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
						uPageCount * sizeof(IMG_SYS_PHYADDR),
						(IMG_VOID **)&psIntSysPAddr, IMG_NULL,
						"Array of Page Addresses") != PVRSRV_OK)
					{
						PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
						return PVRSRV_ERROR_OUT_OF_MEMORY;
					}

					eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
										uPageCount * HOST_PAGESIZE(),
										psIntSysPAddr,
										&hOSWrapMem);
					for (i = 0; i < uPageCount; i++)
					{
						outer_inv_range(psIntSysPAddr[i].uiAddr, psIntSysPAddr[i].uiAddr + HOST_PAGESIZE() -1);
					}

					OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP,
						uPageCount * sizeof(IMG_SYS_PHYADDR),
						psIntSysPAddr, IMG_NULL);

					OSReleasePhysPageAddr(hOSWrapMem);

				}

#endif /* CONFIG_OUTER_CACHE */
			}

		}
	}

#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
	if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL)
	{
		PVR_LOG(("User requested OS reset"));
		OSPanic();
	}
#endif 

	return PVRSRV_OK;
}
enum PVRSRV_ERROR PVRSRVMapDeviceClassMemoryKM(
				struct PVRSRV_PER_PROCESS_DATA *psPerProc,
				void *hDevMemContext, void *hDeviceClassBuffer,
				struct PVRSRV_KERNEL_MEM_INFO **ppsMemInfo,
				void **phOSMapInfo)
{
	enum PVRSRV_ERROR eError;
	struct PVRSRV_KERNEL_MEM_INFO *psMemInfo;
	struct PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
	struct IMG_SYS_PHYADDR *psSysPAddr;
	void *pvCPUVAddr, *pvPageAlignedCPUVAddr;
	IMG_BOOL bPhysContig;
	struct BM_CONTEXT *psBMContext;
	struct DEVICE_MEMORY_INFO *psDevMemoryInfo;
	struct DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
	void *hDevMemHeap = NULL;
	u32 ui32ByteSize;
	u32 ui32Offset;
	u32 ui32PageSize = HOST_PAGESIZE();
	void *hBuffer;
	struct PVRSRV_MEMBLK *psMemBlock;
	IMG_BOOL bBMError;
	u32 i;

	if (!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo ||
	    !hDevMemContext) {
		PVR_DPF(PVR_DBG_ERROR,
			 "PVRSRVMapDeviceClassMemoryKM: invalid parameters");
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	psDeviceClassBuffer = (struct PVRSRV_DEVICECLASS_BUFFER *)
							hDeviceClassBuffer;

	eError =
	    psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->
						  hExtDevice,
						  psDeviceClassBuffer->
						  hExtBuffer, &psSysPAddr,
						  &ui32ByteSize,
						  (void __iomem **)&pvCPUVAddr,
						  phOSMapInfo, &bPhysContig);
	if (eError != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceClassMemoryKM: "
					"unable to get buffer address");
		return PVRSRV_ERROR_GENERIC;
	}

	psBMContext = (struct BM_CONTEXT *)psDeviceClassBuffer->hDevMemContext;
	psDevMemoryInfo = &psBMContext->psDeviceNode->sDevMemoryInfo;
	psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
	for (i = 0; i < PVRSRV_MAX_CLIENT_HEAPS; i++) {
		if (HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) ==
		    psDevMemoryInfo->ui32MappingHeapID) {
			if (psDeviceMemoryHeap[i].DevMemHeapType ==
			    DEVICE_MEMORY_HEAP_PERCONTEXT)
				hDevMemHeap =
				    BM_CreateHeap(hDevMemContext,
						  &psDeviceMemoryHeap[i]);
			else
				hDevMemHeap =
				    psDevMemoryInfo->psDeviceMemoryHeap[i].
								    hDevMemHeap;
			break;
		}
	}

	if (hDevMemHeap == NULL) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceClassMemoryKM: "
					"unable to find mapping heap");
		return PVRSRV_ERROR_GENERIC;
	}

	ui32Offset = ((u32)pvCPUVAddr) & (ui32PageSize - 1);
	pvPageAlignedCPUVAddr = (void *)((u8 *)pvCPUVAddr - ui32Offset);

	if (OSAllocMem(PVRSRV_PAGEABLE_SELECT,
		       sizeof(struct PVRSRV_KERNEL_MEM_INFO),
		       (void **)&psMemInfo, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "PVRSRVMapDeviceClassMemoryKM: "
					"Failed to alloc memory for block");
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));

	psMemBlock = &(psMemInfo->sMemBlk);

	bBMError = BM_Wrap(hDevMemHeap, ui32ByteSize, ui32Offset, bPhysContig,
			   psSysPAddr, pvPageAlignedCPUVAddr,
			   &psMemInfo->ui32Flags, &hBuffer);

	if (!bBMError) {
		PVR_DPF(PVR_DBG_ERROR,
			 "PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed");
		OSFreeMem(PVRSRV_PAGEABLE_SELECT,
			  sizeof(struct PVRSRV_KERNEL_MEM_INFO), psMemInfo,
			  NULL);
		return PVRSRV_ERROR_BAD_MAPPING;
	}

	psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
	psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);

	psMemBlock->hBuffer = (void *) hBuffer;

	psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);

	psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
	psMemInfo->ui32AllocSize = ui32ByteSize;
	psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;

	psMemInfo->pvSysBackupBuffer = NULL;

	psMemInfo->sMemBlk.hResItem =
	    ResManRegisterRes(psPerProc->hResManContext,
			      RESMAN_TYPE_DEVICECLASSMEM_MAPPING, psMemInfo, 0,
			      UnmapDeviceClassMemoryCallBack);

	*ppsMemInfo = psMemInfo;

	return PVRSRV_OK;
}
Beispiel #9
0
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV _PVRSRVAllocDeviceMemKM(IMG_HANDLE					hDevCookie,
												 PVRSRV_PER_PROCESS_DATA	*psPerProc,
												 IMG_HANDLE					hDevMemHeap,
												 IMG_UINT32					ui32Flags,
												 IMG_SIZE_T					ui32Size,
												 IMG_SIZE_T					ui32Alignment,
												 PVRSRV_KERNEL_MEM_INFO		**ppsMemInfo)
{
	PVRSRV_KERNEL_MEM_INFO	*psMemInfo;
	PVRSRV_ERROR 			eError;
	BM_HEAP					*psBMHeap;
	IMG_HANDLE				hDevMemContext;

	if (!hDevMemHeap ||
		(ui32Size == 0))
	{
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	
	if (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
	{
		 
		if (((ui32Size % HOST_PAGESIZE()) != 0) ||
			((ui32Alignment % HOST_PAGESIZE()) != 0))
		{
			return PVRSRV_ERROR_INVALID_PARAMS;
		}
	}

	eError = AllocDeviceMem(hDevCookie,
							hDevMemHeap,
							ui32Flags,
							ui32Size,
							ui32Alignment,
							&psMemInfo);

	if (eError != PVRSRV_OK)
	{
		return eError;
	}

#if defined(PVRSRV_RESOURCE_PROFILING)
	psBMHeap = (BM_HEAP*)hDevMemHeap;
	if (psBMHeap->pBMContext->psDeviceNode->pfnResProfCB)
		psBMHeap->pBMContext->psDeviceNode->pfnResProfCB(hDevCookie, psMemInfo->ui32AllocSize, IMG_TRUE);
#endif

	if (ui32Flags & PVRSRV_MEM_NO_SYNCOBJ)
	{
		psMemInfo->psKernelSyncInfo = IMG_NULL;
	}
	else
	{
		


		psBMHeap = (BM_HEAP*)hDevMemHeap;
		hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
		eError = PVRSRVAllocSyncInfoKM(hDevCookie,
									   hDevMemContext,
									   &psMemInfo->psKernelSyncInfo);
		if(eError != PVRSRV_OK)
		{
			goto free_mainalloc;
		}
		psMemInfo->psKernelSyncInfo->ui32RefCount++;
#if defined(PVRSRV_RESOURCE_PROFILING)
		if (psBMHeap->pBMContext->psDeviceNode->pfnResProfCB)
			psBMHeap->pBMContext->psDeviceNode->pfnResProfCB(hDevCookie,
						psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->ui32AllocSize, IMG_TRUE);
#endif
	}

	
	*ppsMemInfo = psMemInfo;

	if (ui32Flags & PVRSRV_MEM_NO_RESMAN)
	{
		psMemInfo->sMemBlk.hResItem = IMG_NULL;
	}
	else
	{
		
		psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
														RESMAN_TYPE_DEVICEMEM_ALLOCATION,
														psMemInfo,
														0,
														&FreeDeviceMemCallBack);
		if (psMemInfo->sMemBlk.hResItem == IMG_NULL)
		{
			
			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
			goto free_mainalloc;
		}
	}

	
	psMemInfo->ui32RefCount++;

	psMemInfo->memType = PVRSRV_MEMTYPE_DEVICE;

	
	return (PVRSRV_OK);

free_mainalloc:
#if defined(PVRSRV_RESOURCE_PROFILING)
	if (psBMHeap->pBMContext->psDeviceNode->pfnResProfCB)
		psBMHeap->pBMContext->psDeviceNode->pfnResProfCB(hDevCookie,
					psMemInfo->psKernelSyncInfo->psSyncDataMemInfoKM->ui32AllocSize, IMG_FALSE);
#endif
	FreeDeviceMem(psMemInfo);

	return eError;
}
static IMG_BOOL ZeroBuf(struct BM_BUF *pBuf, struct BM_MAPPING *pMapping,
			u32 ui32Bytes, u32 ui32Flags)
{
	void *pvCpuVAddr;

	if (pBuf->CpuVAddr) {
		OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
	} else if (pMapping->eCpuMemoryOrigin == hm_contiguous ||
		   pMapping->eCpuMemoryOrigin == hm_wrapped) {
		pvCpuVAddr = (void __force *)OSMapPhysToLin(pBuf->CpuPAddr,
					    ui32Bytes,
					    PVRSRV_HAP_KERNEL_ONLY |
					    (ui32Flags &
					       PVRSRV_HAP_CACHETYPE_MASK),
					    NULL);
		if (!pvCpuVAddr) {
			PVR_DPF(PVR_DBG_ERROR, "ZeroBuf: "
				"OSMapPhysToLin for contiguous buffer failed");
			return IMG_FALSE;
		}
		OSMemSet(pvCpuVAddr, 0, ui32Bytes);
		OSUnMapPhysToLin((void __force __iomem *)pvCpuVAddr, ui32Bytes,
				 PVRSRV_HAP_KERNEL_ONLY |
				    (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
				 NULL);
	} else {
		u32 ui32BytesRemaining = ui32Bytes;
		u32 ui32CurrentOffset = 0;
		struct IMG_CPU_PHYADDR CpuPAddr;

		PVR_ASSERT(pBuf->hOSMemHandle);

		while (ui32BytesRemaining > 0) {
			u32 ui32BlockBytes =
			    MIN(ui32BytesRemaining, HOST_PAGESIZE());
			CpuPAddr =
			    OSMemHandleToCpuPAddr(pBuf->hOSMemHandle,
						  ui32CurrentOffset);

			if (CpuPAddr.uiAddr & (HOST_PAGESIZE() - 1))
				ui32BlockBytes =
				    MIN(ui32BytesRemaining,
					HOST_PAGEALIGN(CpuPAddr.uiAddr) -
					CpuPAddr.uiAddr);

			pvCpuVAddr = (void __force *)OSMapPhysToLin(CpuPAddr,
						    ui32BlockBytes,
						    PVRSRV_HAP_KERNEL_ONLY |
						     (ui32Flags &
						     PVRSRV_HAP_CACHETYPE_MASK),
						    NULL);
			if (!pvCpuVAddr) {
				PVR_DPF(PVR_DBG_ERROR, "ZeroBuf: "
					"OSMapPhysToLin while "
				       "zeroing non-contiguous memory FAILED");
				return IMG_FALSE;
			}
			OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
			OSUnMapPhysToLin((void __force __iomem *)pvCpuVAddr,
					 ui32BlockBytes,
					 PVRSRV_HAP_KERNEL_ONLY |
					   (ui32Flags &
					    PVRSRV_HAP_CACHETYPE_MASK),
					 NULL);

			ui32BytesRemaining -= ui32BlockBytes;
			ui32CurrentOffset += ui32BlockBytes;
		}
	}

	return IMG_TRUE;
}
Beispiel #11
0
/*!
******************************************************************************

 @Function	PVRSRVGetMiscInfoKM

 @Description
	Retrieves misc. info.

 @Output PVRSRV_MISC_INFO

 @Return   PVRSRV_ERROR :

******************************************************************************/
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVGetMiscInfoKM(PVRSRV_MISC_INFO *psMiscInfo)
{
	SYS_DATA *psSysData;

	if(!psMiscInfo)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid parameters"));
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	psMiscInfo->ui32StatePresent = 0;

	/* do a basic check for uninitialised request flag */
	if(psMiscInfo->ui32StateRequest & ~(PVRSRV_MISC_INFO_TIMER_PRESENT
										|PVRSRV_MISC_INFO_CLOCKGATE_PRESENT
										|PVRSRV_MISC_INFO_MEMSTATS_PRESENT
										|PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT
										|PVRSRV_MISC_INFO_DDKVERSION_PRESENT
										|PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT
										|PVRSRV_MISC_INFO_RESET_PRESENT
										|PVRSRV_MISC_INFO_FREEMEM_PRESENT
										|PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT
										|PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT
										|PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT))
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVGetMiscInfoKM: invalid state request flags"));
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	SysAcquireData(&psSysData);

	/* return SOC Timer registers */
	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_TIMER_PRESENT) != 0UL) &&
		(psSysData->pvSOCTimerRegisterKM != IMG_NULL))
	{
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_TIMER_PRESENT;
		psMiscInfo->pvSOCTimerRegisterKM = psSysData->pvSOCTimerRegisterKM;
		psMiscInfo->hSOCTimerRegisterOSMemHandle = psSysData->hSOCTimerRegisterOSMemHandle;
	}
	else
	{
		psMiscInfo->pvSOCTimerRegisterKM = IMG_NULL;
		psMiscInfo->hSOCTimerRegisterOSMemHandle = IMG_NULL;
	}

	/* return SOC Clock Gating registers */
	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CLOCKGATE_PRESENT) != 0UL) &&
		(psSysData->pvSOCClockGateRegsBase != IMG_NULL))
	{
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CLOCKGATE_PRESENT;
		psMiscInfo->pvSOCClockGateRegs = psSysData->pvSOCClockGateRegsBase;
		psMiscInfo->ui32SOCClockGateRegsSize = psSysData->ui32SOCClockGateRegsSize;
	}

	/* memory stats */
	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) != 0UL) &&
		(psMiscInfo->pszMemoryStr != IMG_NULL))
	{
		RA_ARENA			**ppArena;
/*		BM_HEAP				*psBMHeap;
		BM_CONTEXT			*psBMContext;
		PVRSRV_DEVICE_NODE	*psDeviceNode;*/
		IMG_CHAR			*pszStr;
		IMG_UINT32			ui32StrLen;
		IMG_INT32			i32Count;

		pszStr = psMiscInfo->pszMemoryStr;
		ui32StrLen = psMiscInfo->ui32MemoryStrLen;

		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_MEMSTATS_PRESENT;

		/* Local backing stores */
		ppArena = &psSysData->apsLocalDevMemArena[0];
		while(*ppArena)
		{
			CHECK_SPACE(ui32StrLen);
			i32Count = OSSNPrintf(pszStr, 100, "\nLocal Backing Store:\n");
			UPDATE_SPACE(pszStr, i32Count, ui32StrLen);

			RA_GetStats(*ppArena,
							&pszStr,
							&ui32StrLen);
			/* advance through the array */
			ppArena++;
		}

		/* per device */
/*		psDeviceNode = psSysData->psDeviceNodeList;*/

		/*triple loop; devices:contexts:heaps*/
		List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList,
													&PVRSRVGetMiscInfoKM_Device_AnyVaCb,
													&ui32StrLen,
													&i32Count,
													&pszStr,
													PVRSRV_MISC_INFO_MEMSTATS_PRESENT);

		/* attach a new line and string terminate */
		i32Count = OSSNPrintf(pszStr, 100, "\n");
		UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
	}

	/* Lean version of mem stats: only show free mem on each RA */
	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FREEMEM_PRESENT) != 0)
		&& psMiscInfo->pszMemoryStr)
	{
		IMG_CHAR			*pszStr;
		IMG_UINT32			ui32StrLen;
		IMG_INT32			i32Count;
		
		pszStr = psMiscInfo->pszMemoryStr;
		ui32StrLen = psMiscInfo->ui32MemoryStrLen;
  
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FREEMEM_PRESENT;

		/* triple loop over devices:contexts:heaps */
		List_PVRSRV_DEVICE_NODE_PVRSRV_ERROR_Any_va(psSysData->psDeviceNodeList,
													&PVRSRVGetMiscInfoKM_Device_AnyVaCb,
													&ui32StrLen,
													&i32Count,
													&pszStr,
													PVRSRV_MISC_INFO_FREEMEM_PRESENT);
		
		i32Count = OSSNPrintf(pszStr, 100, "\n");
		UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
	}

	if(((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT) != 0UL) &&
		(psSysData->psGlobalEventObject != IMG_NULL))
	{
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GLOBALEVENTOBJECT_PRESENT;
		psMiscInfo->sGlobalEventObject = *psSysData->psGlobalEventObject;
	}

	/* DDK version and memstats not supported in same call to GetMiscInfo */

	if (((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_DDKVERSION_PRESENT) != 0UL)
		&& ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_MEMSTATS_PRESENT) == 0UL)
		&& (psMiscInfo->pszMemoryStr != IMG_NULL))
	{
		IMG_CHAR	*pszStr;
		IMG_UINT32	ui32StrLen;
		IMG_UINT32 	ui32LenStrPerNum = 12; /* string length per UI32: 10 digits + '.' + '\0' = 12 bytes */
		IMG_INT32	i32Count;
		IMG_INT i;
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_DDKVERSION_PRESENT;

		/* construct DDK string */
		psMiscInfo->aui32DDKVersion[0] = PVRVERSION_MAJ;
		psMiscInfo->aui32DDKVersion[1] = PVRVERSION_MIN;
		psMiscInfo->aui32DDKVersion[2] = PVRVERSION_BUILD_HI;
		psMiscInfo->aui32DDKVersion[3] = PVRVERSION_BUILD_LO;

		pszStr = psMiscInfo->pszMemoryStr;
		ui32StrLen = psMiscInfo->ui32MemoryStrLen;

		for (i=0; i<4; i++)
		{
			if (ui32StrLen < ui32LenStrPerNum)
			{
				return PVRSRV_ERROR_INVALID_PARAMS;
			}

			i32Count = OSSNPrintf(pszStr, ui32LenStrPerNum, "%u", psMiscInfo->aui32DDKVersion[i]);
			UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
			if (i != 3)
			{
				i32Count = OSSNPrintf(pszStr, 2, ".");
				UPDATE_SPACE(pszStr, i32Count, ui32StrLen);
			}
		}
	}

	if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT) != 0UL)
	{
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_CPUCACHEOP_PRESENT;

		if(psMiscInfo->sCacheOpCtl.bDeferOp)
		{
			/* For now, assume deferred ops are "full" cache ops,
			 * and we don't need (or expect) a meminfo.
			 */
			psSysData->ePendingCacheOpType = psMiscInfo->sCacheOpCtl.eCacheOpType;
		}
		else
		{
			PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
			PVRSRV_PER_PROCESS_DATA *psPerProc;

			if(!psMiscInfo->sCacheOpCtl.u.psKernelMemInfo)
			{
				PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: "
						 "Ignoring non-deferred cache op with no meminfo"));
				return PVRSRV_ERROR_INVALID_PARAMS;
			}

			if(psSysData->ePendingCacheOpType != PVRSRV_MISC_INFO_CPUCACHEOP_NONE)
			{
				PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetMiscInfoKM: "
						 "Deferred cache op is pending. It is unlikely you want "
						 "to combine deferred cache ops with immediate ones"));
			}

			psPerProc = PVRSRVFindPerProcessData();

			if(PVRSRVLookupHandle(psPerProc->psHandleBase,
								  (IMG_PVOID *)&psKernelMemInfo,
								  psMiscInfo->sCacheOpCtl.u.psKernelMemInfo,
								  PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK)
			{
				PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: "
						 "Can't find kernel meminfo"));
				return PVRSRV_ERROR_INVALID_PARAMS;
			}

			if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_FLUSH)
			{
				if(!OSFlushCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle,
										   0,
										   psMiscInfo->sCacheOpCtl.pvBaseVAddr,
										   psMiscInfo->sCacheOpCtl.ui32Length))
				{
					return PVRSRV_ERROR_CACHEOP_FAILED;
				}
			}
			else if(psMiscInfo->sCacheOpCtl.eCacheOpType == PVRSRV_MISC_INFO_CPUCACHEOP_CLEAN)
			{
				if(psMiscInfo->sCacheOpCtl.ui32Length!=0)
				{
					if(!OSCleanCPUCacheRangeKM(psKernelMemInfo->sMemBlk.hOSMemHandle,
											   0,
											   psMiscInfo->sCacheOpCtl.pvBaseVAddr,
											   psMiscInfo->sCacheOpCtl.ui32Length))
					{
						return PVRSRV_ERROR_CACHEOP_FAILED;
					}
				}
			}
		}
	}

	if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT) != 0UL)
	{
		PVRSRV_KERNEL_MEM_INFO *psKernelMemInfo;
		PVRSRV_PER_PROCESS_DATA *psPerProc;

		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_REF_COUNT_PRESENT;

		psPerProc = PVRSRVFindPerProcessData();

		if(PVRSRVLookupHandle(psPerProc->psHandleBase,
							  (IMG_PVOID *)&psKernelMemInfo,
							  psMiscInfo->sGetRefCountCtl.u.psKernelMemInfo,
							  PVRSRV_HANDLE_TYPE_MEM_INFO) != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetMiscInfoKM: "
									"Can't find kernel meminfo"));
			return PVRSRV_ERROR_INVALID_PARAMS;
		}

		psMiscInfo->sGetRefCountCtl.ui32RefCount = psKernelMemInfo->ui32RefCount;
	}

	if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT) != 0UL)
	{
		psMiscInfo->ui32PageSize = HOST_PAGESIZE();
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_GET_PAGE_SIZE_PRESENT;
	}

#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
	if((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_RESET_PRESENT) != 0UL)
	{
		PVR_LOG(("User requested OS reset"));
		OSPanic();
	}
#endif /* #if defined(PVRSRV_RESET_ON_HWTIMEOUT) */

#if defined(SUPPORT_PVRSRV_DEVICE_CLASS)
	if ((psMiscInfo->ui32StateRequest & PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT) != 0UL)
	{
		PVRSRVProcessQueues(IMG_TRUE);
		psMiscInfo->ui32StatePresent |= PVRSRV_MISC_INFO_FORCE_SWAP_TO_SYSTEM_PRESENT;
	}
#endif /* defined(SUPPORT_PVRSRV_DEVICE_CLASS) */

	return PVRSRV_OK;
}
Beispiel #12
0
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceMemoryKM(PVRSRV_PER_PROCESS_DATA	*psPerProc,
												  PVRSRV_KERNEL_MEM_INFO	*psSrcMemInfo,
												  IMG_HANDLE				hDstDevMemHeap,
												  PVRSRV_KERNEL_MEM_INFO	**ppsDstMemInfo)
{
	PVRSRV_ERROR				eError;
	IMG_UINT32					i;
	IMG_SIZE_T					ui32PageCount, ui32PageOffset;
	IMG_SIZE_T					ui32HostPageSize = HOST_PAGESIZE();
	IMG_SYS_PHYADDR				*psSysPAddr = IMG_NULL;
	IMG_DEV_PHYADDR				sDevPAddr;
	BM_BUF						*psBuf;
	IMG_DEV_VIRTADDR			sDevVAddr;
	PVRSRV_KERNEL_MEM_INFO		*psMemInfo = IMG_NULL;
	BM_HANDLE 					hBuffer;
	PVRSRV_MEMBLK				*psMemBlock;
	IMG_BOOL					bBMError;
	PVRSRV_DEVICE_NODE			*psDeviceNode;
	IMG_VOID 					*pvPageAlignedCPUVAddr;
	RESMAN_MAP_DEVICE_MEM_DATA	*psMapData = IMG_NULL;

	
	if(!psSrcMemInfo || !hDstDevMemHeap || !ppsDstMemInfo)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: invalid parameters"));
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	
	*ppsDstMemInfo = IMG_NULL;

	ui32PageOffset = psSrcMemInfo->sDevVAddr.uiAddr & (ui32HostPageSize - 1);
	ui32PageCount = HOST_PAGEALIGN(psSrcMemInfo->ui32AllocSize + ui32PageOffset) / ui32HostPageSize;
	pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)psSrcMemInfo->pvLinAddrKM - ui32PageOffset);

	



	if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
					ui32PageCount*sizeof(IMG_SYS_PHYADDR),
					(IMG_VOID **)&psSysPAddr, IMG_NULL,
					"Array of Page Addresses") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	psBuf = psSrcMemInfo->sMemBlk.hBuffer;

	
	psDeviceNode = psBuf->pMapping->pBMHeap->pBMContext->psDeviceNode;

	
	sDevVAddr.uiAddr = psSrcMemInfo->sDevVAddr.uiAddr - IMG_CAST_TO_DEVVADDR_UINT(ui32PageOffset);
	for(i=0; i<ui32PageCount; i++)
	{
		BM_GetPhysPageAddr(psSrcMemInfo, sDevVAddr, &sDevPAddr);

		
		psSysPAddr[i] = SysDevPAddrToSysPAddr (psDeviceNode->sDevId.eDeviceType, sDevPAddr);

		
		sDevVAddr.uiAddr += IMG_CAST_TO_DEVVADDR_UINT(ui32HostPageSize);
	}

	
	if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
					sizeof(RESMAN_MAP_DEVICE_MEM_DATA),
					(IMG_VOID **)&psMapData, IMG_NULL,
					"Resource Manager Map Data") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc resman map data"));
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto ErrorExit;
	}


	if(OSAllocMem(PVRSRV_PAGEABLE_SELECT,
					sizeof(PVRSRV_KERNEL_MEM_INFO),
					(IMG_VOID **)&psMemInfo, IMG_NULL,
					"Kernel Memory Info") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: Failed to alloc memory for block"));
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto ErrorExit;
	}

	OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
	psMemInfo->ui32Flags = psSrcMemInfo->ui32Flags;

	psMemBlock = &(psMemInfo->sMemBlk);

	bBMError = BM_Wrap(hDstDevMemHeap,
					   psSrcMemInfo->ui32AllocSize,
					   ui32PageOffset,
					   IMG_FALSE,
					   psSysPAddr,
					   pvPageAlignedCPUVAddr,
					   &psMemInfo->ui32Flags,
					   &hBuffer);

	if (!bBMError)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceMemoryKM: BM_Wrap Failed"));
		eError = PVRSRV_ERROR_BAD_MAPPING;
		goto ErrorExit;
	}

	
	psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
	psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);

	
	psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;

	
	psMemBlock->psIntSysPAddr = psSysPAddr;

	
	psMemInfo->pvLinAddrKM = psSrcMemInfo->pvLinAddrKM;

	
	psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
	psMemInfo->ui32AllocSize = psSrcMemInfo->ui32AllocSize;
	psMemInfo->psKernelSyncInfo = psSrcMemInfo->psKernelSyncInfo;

	
	if( psMemInfo->psKernelSyncInfo )
		psMemInfo->psKernelSyncInfo->ui32RefCount++;

	

	psMemInfo->pvSysBackupBuffer = IMG_NULL;

	
	psMemInfo->ui32RefCount++;

	
	psSrcMemInfo->ui32RefCount++;

	
	BM_Export(psSrcMemInfo->sMemBlk.hBuffer);

	psMemInfo->memType = PVRSRV_MEMTYPE_MAPPED;

	
	psMapData->psMemInfo = psMemInfo;
	psMapData->psSrcMemInfo = psSrcMemInfo;

	
	psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
													RESMAN_TYPE_DEVICEMEM_MAPPING,
													psMapData,
													0,
													&UnmapDeviceMemoryCallBack);

	*ppsDstMemInfo = psMemInfo;

	return PVRSRV_OK;

	

ErrorExit:

	if(psSysPAddr)
	{
		
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(IMG_SYS_PHYADDR), psSysPAddr, IMG_NULL);
		
	}

	if(psMemInfo)
	{
		
		OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
		
	}

	if(psMapData)
	{
		
		OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(RESMAN_MAP_DEVICE_MEM_DATA), psMapData, IMG_NULL);
		
	}

	return eError;
}
Beispiel #13
0
IMG_HANDLE
BM_CreateHeap (IMG_HANDLE hBMContext,
			   DEVICE_MEMORY_HEAP_INFO *psDevMemHeapInfo)
{
	BM_CONTEXT *pBMContext = (BM_CONTEXT*)hBMContext;
	PVRSRV_DEVICE_NODE *psDeviceNode;
	BM_HEAP *psBMHeap;

	PVR_DPF((PVR_DBG_MESSAGE, "BM_CreateHeap"));

	if(!pBMContext)
	{
		PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: BM_CONTEXT null"));
		return IMG_NULL;
	}

	psDeviceNode = pBMContext->psDeviceNode;

	




	if(pBMContext->ui32RefCount > 0)
	{
		psBMHeap = (BM_HEAP*)List_BM_HEAP_Any_va(pBMContext->psBMHeap,
												 &BM_CreateHeap_AnyVaCb,
												 psDevMemHeapInfo);

		if (psBMHeap)
		{
			return psBMHeap;
		}
	}


	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
						sizeof (BM_HEAP),
						(IMG_PVOID *)&psBMHeap, IMG_NULL,
						"Buffer Manager Heap") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: Alloc failed"));
		return IMG_NULL;
	}

	OSMemSet (psBMHeap, 0, sizeof (BM_HEAP));

	psBMHeap->sDevArena.ui32HeapID = psDevMemHeapInfo->ui32HeapID;
	psBMHeap->sDevArena.pszName = psDevMemHeapInfo->pszName;
	psBMHeap->sDevArena.BaseDevVAddr = psDevMemHeapInfo->sDevVAddrBase;
	psBMHeap->sDevArena.ui32Size = psDevMemHeapInfo->ui32HeapSize;
	psBMHeap->sDevArena.DevMemHeapType = psDevMemHeapInfo->DevMemHeapType;
	psBMHeap->sDevArena.ui32DataPageSize = psDevMemHeapInfo->ui32DataPageSize;
	psBMHeap->sDevArena.psDeviceMemoryHeapInfo = psDevMemHeapInfo;
	psBMHeap->ui32Attribs = psDevMemHeapInfo->ui32Attribs;

	
	psBMHeap->pBMContext = pBMContext;

	psBMHeap->pMMUHeap = psDeviceNode->pfnMMUCreate (pBMContext->psMMUContext,
													&psBMHeap->sDevArena,
													&psBMHeap->pVMArena,
													&psBMHeap->psMMUAttrib);
	if (!psBMHeap->pMMUHeap)
	{
		PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: MMUCreate failed"));
		goto ErrorExit;
	}

	
	psBMHeap->pImportArena = RA_Create (psDevMemHeapInfo->pszBSName,
										0, 0, IMG_NULL,
										MAX(HOST_PAGESIZE(), psBMHeap->sDevArena.ui32DataPageSize),
										&BM_ImportMemory,
										&BM_FreeMemory,
										IMG_NULL,
										psBMHeap);
	if(psBMHeap->pImportArena == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: RA_Create failed"));
		goto ErrorExit;
	}

	if(psBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
	{
		



		psBMHeap->pLocalDevMemArena = psDevMemHeapInfo->psLocalDevMemArena;
		if(psBMHeap->pLocalDevMemArena == IMG_NULL)
		{
			PVR_DPF((PVR_DBG_ERROR, "BM_CreateHeap: LocalDevMemArena null"));
			goto ErrorExit;
		}
	}

	
	List_BM_HEAP_Insert(&pBMContext->psBMHeap, psBMHeap);

	return (IMG_HANDLE)psBMHeap;

	
ErrorExit:

	
	if (psBMHeap->pMMUHeap != IMG_NULL)
	{
		psDeviceNode->pfnMMUDelete (psBMHeap->pMMUHeap);
		psDeviceNode->pfnMMUFinalise (pBMContext->psMMUContext);
	}

	
	OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_HEAP), psBMHeap, IMG_NULL);
	

	return IMG_NULL;
}
Beispiel #14
0
enum PVRSRV_ERROR PDumpMem2KM(enum PVRSRV_DEVICE_TYPE eDeviceType,
			 void *pvLinAddr, u32 ui32Bytes, u32 ui32Flags,
			 IMG_BOOL bInitialisePages, void *hUniqueTag1,
			 void *hUniqueTag2)
{
	u32 ui32NumPages;
	u32 ui32PageOffset;
	u32 ui32BlockBytes;
	u8 *pui8LinAddr;
	struct IMG_DEV_PHYADDR sDevPAddr;
	struct IMG_CPU_PHYADDR sCpuPAddr;
	u32 ui32Offset;
	u32 ui32ParamOutPos;

	__PDBG_PDUMP_STATE_GET_SCRIPT_AND_FILE_STRING(PVRSRV_ERROR_GENERIC);

	if (ui32Flags)
		;

	if (!pvLinAddr)
		return PVRSRV_ERROR_GENERIC;

	if (gui32PDumpSuspended)
		return PVRSRV_OK;

	ui32ParamOutPos =
	    gpfnDbgDrv->pfnGetStreamOffset(gsDBGPdumpState.
					   psStream[PDUMP_STREAM_PARAM2]);

	if (bInitialisePages) {

		if (!PDumpWriteILock
		    (gsDBGPdumpState.psStream[PDUMP_STREAM_PARAM2], pvLinAddr,
		     ui32Bytes, PDUMP_FLAGS_CONTINUOUS))
			return PVRSRV_ERROR_GENERIC;

		if (gsDBGPdumpState.ui32ParamFileNum == 0)
			snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%.prm");
		else
			snprintf(pszFile, SZ_FILENAME_SIZE_MAX, "%%0%%%lu.prm",
				 gsDBGPdumpState.ui32ParamFileNum);
	}

	ui32PageOffset = (u32) pvLinAddr & (HOST_PAGESIZE() - 1);
	ui32NumPages =
	    (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() -
	     1) / HOST_PAGESIZE();
	pui8LinAddr = (u8 *) pvLinAddr;

	while (ui32NumPages--) {
		sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
		sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);

		if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE())

			ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset;
		else

			ui32BlockBytes = ui32Bytes;

		if (bInitialisePages) {
			snprintf(pszScript,
				 SZ_SCRIPT_SIZE_MAX,
				 "LDB :SGXMEM:PA_%p%8.8lX:0x%8.8lX 0x%8.8lX "
				 "0x%8.8lX %s\r\n",
				 hUniqueTag1,
				 sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_SIZE - 1),
				 sDevPAddr.uiAddr & (SGX_MMU_PAGE_SIZE - 1),
				 ui32BlockBytes, ui32ParamOutPos, pszFile);
			PDumpWriteString2(pszScript, PDUMP_FLAGS_CONTINUOUS);
		} else {
			for (ui32Offset = 0; ui32Offset < ui32BlockBytes;
			     ui32Offset += sizeof(u32)) {
				u32 ui32PTE =
				    *((u32 *) (pui8LinAddr +
						      ui32Offset));

				if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0) {
					snprintf(pszScript,
						 SZ_SCRIPT_SIZE_MAX,
						 "WRW :SGXMEM:PA_%p%8.8lX:"
						 "0x%8.8lX :SGXMEM:"
						 "PA_%p%8.8lX:0x%8.8lX\r\n",
						 hUniqueTag1,
						 (sDevPAddr.uiAddr +
						  ui32Offset) &
						 ~(SGX_MMU_PAGE_SIZE - 1),
						 (sDevPAddr.uiAddr +
						  ui32Offset) &
						 (SGX_MMU_PAGE_SIZE - 1),
						 hUniqueTag2,
						 ui32PTE &
						 SGX_MMU_PDE_ADDR_MASK,
						 ui32PTE &
						 ~SGX_MMU_PDE_ADDR_MASK);
				} else {
					PVR_ASSERT(!
						   (ui32PTE &
						    SGX_MMU_PTE_VALID));
					snprintf(pszScript, SZ_SCRIPT_SIZE_MAX,
						 "WRW :SGXMEM:PA_%p%8.8lX:"
						 "0x%8.8lX 0x%8.8lX%p\r\n",
						 hUniqueTag1,
						 (sDevPAddr.uiAddr +
						  ui32Offset) &
						 ~(SGX_MMU_PAGE_SIZE - 1),
						 (sDevPAddr.uiAddr +
						  ui32Offset) &
						 (SGX_MMU_PAGE_SIZE - 1),
						 ui32PTE, hUniqueTag2);
				}
				PDumpWriteString2(pszScript,
						  PDUMP_FLAGS_CONTINUOUS);
			}
		}

		ui32PageOffset = 0;
		ui32Bytes -= ui32BlockBytes;
		pui8LinAddr += ui32BlockBytes;
		ui32ParamOutPos += ui32BlockBytes;
	}

	return PVRSRV_OK;
}
Beispiel #15
0
static IMG_BOOL
ZeroBuf(BM_BUF *pBuf, BM_MAPPING *pMapping, IMG_SIZE_T ui32Bytes, IMG_UINT32 ui32Flags)
{
	IMG_VOID *pvCpuVAddr;

	if(pBuf->CpuVAddr)
	{
		OSMemSet(pBuf->CpuVAddr, 0, ui32Bytes);
	}
	else if(pMapping->eCpuMemoryOrigin == hm_contiguous
			|| pMapping->eCpuMemoryOrigin == hm_wrapped)
	{
		pvCpuVAddr = OSMapPhysToLin(pBuf->CpuPAddr,
									ui32Bytes,
									PVRSRV_HAP_KERNEL_ONLY
									| (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
									IMG_NULL);
		if(!pvCpuVAddr)
		{
			PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin for contiguous buffer failed"));
			return IMG_FALSE;
		}
		OSMemSet(pvCpuVAddr, 0, ui32Bytes);
		OSUnMapPhysToLin(pvCpuVAddr,
						 ui32Bytes,
						 PVRSRV_HAP_KERNEL_ONLY
						 | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
						 IMG_NULL);
	}
	else
	{
		IMG_SIZE_T ui32BytesRemaining = ui32Bytes;
		IMG_SIZE_T ui32CurrentOffset = 0;
		IMG_CPU_PHYADDR CpuPAddr;

		
		PVR_ASSERT(pBuf->hOSMemHandle);

		while(ui32BytesRemaining > 0)
		{
			IMG_SIZE_T ui32BlockBytes = MIN(ui32BytesRemaining, HOST_PAGESIZE());
			CpuPAddr = OSMemHandleToCpuPAddr(pBuf->hOSMemHandle, ui32CurrentOffset);
			
			if(CpuPAddr.uiAddr & (HOST_PAGESIZE() -1))
			{
				ui32BlockBytes =
					MIN(ui32BytesRemaining, (IMG_UINT32)(HOST_PAGEALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr));
			}

			pvCpuVAddr = OSMapPhysToLin(CpuPAddr,
										ui32BlockBytes,
										PVRSRV_HAP_KERNEL_ONLY
										| (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
										IMG_NULL);
			if(!pvCpuVAddr)
			{
				PVR_DPF((PVR_DBG_ERROR, "ZeroBuf: OSMapPhysToLin while zeroing non-contiguous memory FAILED"));
				return IMG_FALSE;
			}
			OSMemSet(pvCpuVAddr, 0, ui32BlockBytes);
			OSUnMapPhysToLin(pvCpuVAddr,
							 ui32BlockBytes,
							 PVRSRV_HAP_KERNEL_ONLY
							 | (ui32Flags & PVRSRV_HAP_CACHETYPE_MASK),
							 IMG_NULL);

			ui32BytesRemaining -= ui32BlockBytes;
			ui32CurrentOffset += ui32BlockBytes;
		}
	}

	return IMG_TRUE;
}
Beispiel #16
0
static IMG_BOOL
WrapMemory (BM_HEAP *psBMHeap,
			IMG_SIZE_T uSize,
			IMG_SIZE_T ui32BaseOffset,
			IMG_BOOL bPhysContig,
			IMG_SYS_PHYADDR *psAddr,
			IMG_VOID *pvCPUVAddr,
			IMG_UINT32 uFlags,
			BM_BUF *pBuf)
{
	IMG_DEV_VIRTADDR DevVAddr = {0};
	BM_MAPPING *pMapping;
	IMG_BOOL bResult;
	IMG_SIZE_T const ui32PageSize = HOST_PAGESIZE();

	PVR_DPF ((PVR_DBG_MESSAGE,
			  "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, bPhysContig=0x%x, pvCPUVAddr = 0x%08x, flags=0x%x)",
			  (IMG_UINTPTR_T)psBMHeap, uSize, ui32BaseOffset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags));

	PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);

	PVR_ASSERT(((IMG_UINTPTR_T)pvCPUVAddr & (ui32PageSize - 1)) == 0);

	uSize += ui32BaseOffset;
	uSize = HOST_PAGEALIGN (uSize);

	
	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
						sizeof(*pMapping),
						(IMG_PVOID *)&pMapping, IMG_NULL,
						"Mocked-up mapping") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",sizeof(*pMapping)));
		return IMG_FALSE;
	}

	OSMemSet(pMapping, 0, sizeof (*pMapping));

	pMapping->uSize = uSize;
	pMapping->pBMHeap = psBMHeap;

	if(pvCPUVAddr)
	{
		pMapping->CpuVAddr = pvCPUVAddr;

		if (bPhysContig)
		{
			pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
			pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);

			if(OSRegisterMem(pMapping->CpuPAddr,
							pMapping->CpuVAddr,
							pMapping->uSize,
							uFlags,
							&pMapping->hOSMemHandle) != PVRSRV_OK)
			{
				PVR_DPF((PVR_DBG_ERROR,	"WrapMemory: OSRegisterMem Phys=0x%08X, Size=%d) failed",
					pMapping->CpuPAddr.uiAddr, pMapping->uSize));
				goto fail_cleanup;
			}
		}
		else
		{
			pMapping->eCpuMemoryOrigin = hm_wrapped_scatter_virtaddr;
			pMapping->psSysAddr = psAddr;

			if(OSRegisterDiscontigMem(pMapping->psSysAddr,
							pMapping->CpuVAddr,
							pMapping->uSize,
							uFlags,
							&pMapping->hOSMemHandle) != PVRSRV_OK)
			{
				PVR_DPF((PVR_DBG_ERROR,	"WrapMemory: OSRegisterDiscontigMem Size=%d) failed",
					pMapping->uSize));
				goto fail_cleanup;
			}
		}
	}
	else
	{
		if (bPhysContig)
		{
			pMapping->eCpuMemoryOrigin = hm_wrapped;
			pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);

			if(OSReservePhys(pMapping->CpuPAddr,
							 pMapping->uSize,
							 uFlags,
							 &pMapping->CpuVAddr,
							 &pMapping->hOSMemHandle) != PVRSRV_OK)
			{
				PVR_DPF((PVR_DBG_ERROR,	"WrapMemory: OSReservePhys Phys=0x%08X, Size=%d) failed",
					pMapping->CpuPAddr.uiAddr, pMapping->uSize));
				goto fail_cleanup;
			}
		}
		else
		{
			pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
			pMapping->psSysAddr = psAddr;

			if(OSReserveDiscontigPhys(pMapping->psSysAddr,
							 pMapping->uSize,
							 uFlags,
							 &pMapping->CpuVAddr,
							 &pMapping->hOSMemHandle) != PVRSRV_OK)
			{
				PVR_DPF((PVR_DBG_ERROR,	"WrapMemory: OSReserveDiscontigPhys Size=%d) failed",
					pMapping->uSize));
				goto fail_cleanup;
			}
		}
	}

	
	bResult = DevMemoryAlloc(psBMHeap->pBMContext,
							 pMapping,
							 IMG_NULL,
							 uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
							 IMG_CAST_TO_DEVVADDR_UINT(ui32PageSize),
							 &DevVAddr);
	if (!bResult)
	{
		PVR_DPF((PVR_DBG_ERROR,
				"WrapMemory: DevMemoryAlloc(0x%x) failed",
				pMapping->uSize));
		goto fail_cleanup;
	}

	
	pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
	if(!ui32BaseOffset)
	{
		pBuf->hOSMemHandle = pMapping->hOSMemHandle;
	}
	else
	{
		if(OSGetSubMemHandle(pMapping->hOSMemHandle,
							 ui32BaseOffset,
							 (pMapping->uSize-ui32BaseOffset),
							 uFlags,
							 &pBuf->hOSMemHandle)!=PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR, "WrapMemory: OSGetSubMemHandle failed"));
			goto fail_cleanup;
		}
	}
	if(pMapping->CpuVAddr)
	{
		pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + ui32BaseOffset);
	}
	pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + IMG_CAST_TO_DEVVADDR_UINT(ui32BaseOffset);

	if(uFlags & PVRSRV_MEM_ZERO)
	{
		if(!ZeroBuf(pBuf, pMapping, uSize, uFlags))
		{
			return IMG_FALSE;
		}
	}

	PVR_DPF ((PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr));
	PVR_DPF ((PVR_DBG_MESSAGE,
				"WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x",
				pMapping->DevVAddr.uiAddr, pMapping->CpuPAddr.uiAddr, pMapping->uSize));
	PVR_DPF ((PVR_DBG_MESSAGE,
				"WrapMemory: DevV=%08X CpuP=%08X uSize=0x%x",
				pBuf->DevVAddr.uiAddr, pBuf->CpuPAddr.uiAddr, uSize));

	pBuf->pMapping = pMapping;
	return IMG_TRUE;

fail_cleanup:
	if(ui32BaseOffset && pBuf->hOSMemHandle)
	{
		OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);
	}

	if(pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
	{
		switch(pMapping->eCpuMemoryOrigin)
		{
			case hm_wrapped:
				OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
				break;
			case hm_wrapped_virtaddr:
				OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
				break;
			case hm_wrapped_scatter:
				OSUnReserveDiscontigPhys(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
				break;
			case hm_wrapped_scatter_virtaddr:
				OSUnRegisterDiscontigMem(pMapping->CpuVAddr, pMapping->uSize, uFlags, pMapping->hOSMemHandle);
				break;
			default:
				break;
		}

	}

	OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
	

	return IMG_FALSE;
}
Beispiel #17
0
IMG_BOOL
BM_Wrap (	IMG_HANDLE hDevMemHeap,
			IMG_SIZE_T ui32Size,
			IMG_SIZE_T ui32Offset,
			IMG_BOOL bPhysContig,
			IMG_SYS_PHYADDR *psSysAddr,
			IMG_VOID *pvCPUVAddr,
			IMG_UINT32 *pui32Flags,
			BM_HANDLE *phBuf)
{
	BM_BUF *pBuf;
	BM_CONTEXT *psBMContext;
	BM_HEAP *psBMHeap;
	SYS_DATA *psSysData;
	IMG_SYS_PHYADDR sHashAddress;
	IMG_UINT32 uFlags;

	psBMHeap = (BM_HEAP*)hDevMemHeap;
	psBMContext = psBMHeap->pBMContext;

	uFlags = psBMHeap->ui32Attribs & (PVRSRV_HAP_CACHETYPE_MASK | PVRSRV_HAP_MAPTYPE_MASK);

	if ((pui32Flags != IMG_NULL) && ((*pui32Flags & PVRSRV_HAP_CACHETYPE_MASK) != 0))
	{
		uFlags &= ~PVRSRV_HAP_CACHETYPE_MASK;
		uFlags |= *pui32Flags & PVRSRV_HAP_CACHETYPE_MASK;
	}

	PVR_DPF ((PVR_DBG_MESSAGE,
		  "BM_Wrap (uSize=0x%x, uOffset=0x%x, bPhysContig=0x%x, pvCPUVAddr=0x%x, uFlags=0x%x)",
			ui32Size, ui32Offset, bPhysContig, (IMG_UINTPTR_T)pvCPUVAddr, uFlags));

	SysAcquireData(&psSysData);

#if defined(PVR_LMA)
	if (bPhysContig)
	{
		if (!ValidSysPAddrRangeForDev(psBMContext->psDeviceNode, *psSysAddr, WRAP_MAPPING_SIZE(ui32Size, ui32Offset)))
		{
			PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: System address range invalid for device"));
			return IMG_FALSE;
		}
	}
	else
	{
		IMG_SIZE_T ui32HostPageSize = HOST_PAGESIZE();

		if (!ValidSysPAddrArrayForDev(psBMContext->psDeviceNode, psSysAddr, WRAP_PAGE_COUNT(ui32Size, ui32Offset, ui32HostPageSize), ui32HostPageSize))
		{
			PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: Array of system addresses invalid for device"));
			return IMG_FALSE;
		}
	}
#endif
	
	sHashAddress = psSysAddr[0];

	
	sHashAddress.uiAddr += ui32Offset;

	
	pBuf = (BM_BUF *)HASH_Retrieve(psBMContext->pBufferHash, sHashAddress.uiAddr);

	if(pBuf)
	{
		IMG_SIZE_T ui32MappingSize = HOST_PAGEALIGN (ui32Size + ui32Offset);

		
		if(pBuf->pMapping->uSize == ui32MappingSize && (pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped ||
														pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr))
		{
			PVR_DPF((PVR_DBG_MESSAGE,
					"BM_Wrap (Matched previous Wrap! uSize=0x%x, uOffset=0x%x, SysAddr=%08X)",
					ui32Size, ui32Offset, sHashAddress.uiAddr));

			pBuf->ui32RefCount++;
			*phBuf = (BM_HANDLE)pBuf;
			if(pui32Flags)
				*pui32Flags = uFlags;

			return IMG_TRUE;
		}
	}

	
	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
						sizeof (BM_BUF),
						(IMG_PVOID *)&pBuf, IMG_NULL,
						"Buffer Manager buffer") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: BM_Buf alloc FAILED"));
		return IMG_FALSE;
	}
	OSMemSet(pBuf, 0, sizeof (BM_BUF));

	
	if (WrapMemory (psBMHeap, ui32Size, ui32Offset, bPhysContig, psSysAddr, pvCPUVAddr, uFlags, pBuf) != IMG_TRUE)
	{
		PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: WrapMemory FAILED"));
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof (BM_BUF), pBuf, IMG_NULL);
		
		return IMG_FALSE;
	}

	
	if(pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped || pBuf->pMapping->eCpuMemoryOrigin == hm_wrapped_virtaddr)
	{
		
		PVR_ASSERT(SysSysPAddrToCpuPAddr(sHashAddress).uiAddr == pBuf->CpuPAddr.uiAddr);

		if (!HASH_Insert (psBMContext->pBufferHash, sHashAddress.uiAddr, (IMG_UINTPTR_T)pBuf))
		{
			FreeBuf (pBuf, uFlags, IMG_TRUE);
			PVR_DPF((PVR_DBG_ERROR, "BM_Wrap: HASH_Insert FAILED"));
			return IMG_FALSE;
		}
	}

	PVR_DPF ((PVR_DBG_MESSAGE,
			"BM_Wrap (uSize=0x%x, uFlags=0x%x, devVAddr=%08X)",
			ui32Size, uFlags, pBuf->DevVAddr.uiAddr));

	
	pBuf->ui32RefCount = 1;
	*phBuf = (BM_HANDLE)pBuf;
	if(pui32Flags)
	{
		
		*pui32Flags = (uFlags & ~PVRSRV_HAP_MAPTYPE_MASK) | PVRSRV_HAP_MULTI_PROCESS;
	}

	return IMG_TRUE;
}
Beispiel #18
0
PVRSRV_ERROR PDumpMemKM(void *pvAltLinAddr,
			PVRSRV_KERNEL_MEM_INFO * psMemInfo,
			u32 ui32Offset,
			u32 ui32Bytes, u32 ui32Flags, void *hUniqueTag)
{
	PVRSRV_ERROR eErr;
	u32 ui32NumPages;
	u32 ui32PageByteOffset;
	u32 ui32BlockBytes;
	u8 *pui8LinAddr;
	u8 *pui8DataLinAddr = NULL;
	IMG_DEV_VIRTADDR sDevVPageAddr;
	IMG_DEV_VIRTADDR sDevVAddr;
	IMG_DEV_PHYADDR sDevPAddr;
	u32 ui32ParamOutPos;

	PDUMP_GET_SCRIPT_AND_FILE_STRING();

	PVR_ASSERT((ui32Offset + ui32Bytes) <= psMemInfo->ui32AllocSize);

	if (!PDumpOSJTInitialised()) {
		return PVRSRV_ERROR_GENERIC;
	}

	if (ui32Bytes == 0 || PDumpOSIsSuspended()) {
		return PVRSRV_OK;
	}

	if (pvAltLinAddr) {
		pui8DataLinAddr = pvAltLinAddr;
	} else if (psMemInfo->pvLinAddrKM) {
		pui8DataLinAddr = (u8 *) psMemInfo->pvLinAddrKM + ui32Offset;
	}
	pui8LinAddr = (u8 *) psMemInfo->pvLinAddrKM;
	sDevVAddr = psMemInfo->sDevVAddr;

	sDevVAddr.uiAddr += ui32Offset;
	pui8LinAddr += ui32Offset;

	PVR_ASSERT(pui8DataLinAddr);

	PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
				 ui32Bytes, ui32Flags);

	ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);

	if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
				pui8DataLinAddr, ui32Bytes, ui32Flags)) {
		return PVRSRV_ERROR_GENERIC;
	}

	if (PDumpOSGetParamFileNum() == 0) {
		eErr =
		    PDumpOSSprintf(pszFileName, ui32MaxLenFileName,
				   "%%0%%.prm");
	} else {
		eErr =
		    PDumpOSSprintf(pszFileName, ui32MaxLenFileName,
				   "%%0%%%lu.prm", PDumpOSGetParamFileNum());
	}
	if (eErr != PVRSRV_OK) {
		return eErr;
	}

	eErr = PDumpOSBufprintf(hScript,
				ui32MaxLenScript,
				"-- LDB :SGXMEM:VA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
				(u32) hUniqueTag,
				psMemInfo->sDevVAddr.uiAddr,
				ui32Offset,
				ui32Bytes, ui32ParamOutPos, pszFileName);
	if (eErr != PVRSRV_OK) {
		return eErr;
	}
	PDumpOSWriteString2(hScript, ui32Flags);

	PDumpOSCPUVAddrToPhysPages(psMemInfo->sMemBlk.hOSMemHandle,
				   ui32Offset,
				   pui8LinAddr, &ui32PageByteOffset);
	ui32NumPages =
	    (ui32PageByteOffset + ui32Bytes + HOST_PAGESIZE() -
	     1) / HOST_PAGESIZE();

	while (ui32NumPages) {
#if 0
		u32 ui32BlockBytes = MIN(ui32BytesRemaining, PAGE_SIZE);
		CpuPAddr =
		    OSMemHandleToCpuPAddr(psMemInfo->sMemBlk.hOSMemHandle,
					  ui32CurrentOffset);
#endif
		ui32NumPages--;

		sDevVPageAddr.uiAddr = sDevVAddr.uiAddr - ui32PageByteOffset;

		PVR_ASSERT((sDevVPageAddr.uiAddr & 0xFFF) == 0);

		BM_GetPhysPageAddr(psMemInfo, sDevVPageAddr, &sDevPAddr);

		sDevPAddr.uiAddr += ui32PageByteOffset;
#if 0
		if (ui32PageByteOffset) {
			ui32BlockBytes =
			    MIN(ui32BytesRemaining,
				PAGE_ALIGN(CpuPAddr.uiAddr) - CpuPAddr.uiAddr);

			ui32PageByteOffset = 0;
		}
#endif

		if (ui32PageByteOffset + ui32Bytes > HOST_PAGESIZE()) {

			ui32BlockBytes = HOST_PAGESIZE() - ui32PageByteOffset;
		} else {

			ui32BlockBytes = ui32Bytes;
		}

		eErr = PDumpOSBufprintf(hScript,
					ui32MaxLenScript,
					"LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
					(u32) hUniqueTag,
					sDevPAddr.uiAddr & ~(SGX_MMU_PAGE_MASK),
					sDevPAddr.uiAddr & (SGX_MMU_PAGE_MASK),
					ui32BlockBytes,
					ui32ParamOutPos, pszFileName);
		if (eErr != PVRSRV_OK) {
			return eErr;
		}
		PDumpOSWriteString2(hScript, ui32Flags);

		ui32PageByteOffset = 0;

		ui32Bytes -= ui32BlockBytes;

		sDevVAddr.uiAddr += ui32BlockBytes;

		pui8LinAddr += ui32BlockBytes;

		ui32ParamOutPos += ui32BlockBytes;
	}

	return PVRSRV_OK;
}
static IMG_BOOL WrapMemory(struct BM_HEAP *psBMHeap,
	   size_t uSize, u32 ui32BaseOffset, IMG_BOOL bPhysContig,
	   struct IMG_SYS_PHYADDR *psAddr, void *pvCPUVAddr, u32 uFlags,
	   struct BM_BUF *pBuf)
{
	struct IMG_DEV_VIRTADDR DevVAddr = { 0 };
	struct BM_MAPPING *pMapping;
	IMG_BOOL bResult;
	u32 const ui32PageSize = HOST_PAGESIZE();

	PVR_DPF(PVR_DBG_MESSAGE,
		 "WrapMemory(psBMHeap=%08X, size=0x%x, offset=0x%x, "
		 "bPhysContig=0x%x, pvCPUVAddr = 0x%x, flags=0x%x, pBuf=%08X)",
		 psBMHeap, uSize, ui32BaseOffset, bPhysContig, pvCPUVAddr,
		 uFlags, pBuf);

	PVR_ASSERT((psAddr->uiAddr & (ui32PageSize - 1)) == 0);

	PVR_ASSERT(((u32) pvCPUVAddr & (ui32PageSize - 1)) == 0);

	uSize += ui32BaseOffset;
	uSize = HOST_PAGEALIGN(uSize);

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(*pMapping),
		       (void **)&pMapping, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR, "WrapMemory: OSAllocMem(0x%x) FAILED",
			 sizeof(*pMapping));
		return IMG_FALSE;
	}

	OSMemSet(pMapping, 0, sizeof(*pMapping));

	pMapping->uSize = uSize;
	pMapping->pBMHeap = psBMHeap;

	if (pvCPUVAddr) {
		pMapping->CpuVAddr = pvCPUVAddr;

		if (bPhysContig) {
			pMapping->eCpuMemoryOrigin = hm_wrapped_virtaddr;
			pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);

			if (OSRegisterMem(pMapping->CpuPAddr,
					pMapping->CpuVAddr, pMapping->uSize,
					  uFlags, &pMapping->hOSMemHandle) !=
					PVRSRV_OK) {
				PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
					"OSRegisterMem Phys=0x%08X, "
					"CpuVAddr = 0x%08X, Size=%d) failed",
					 pMapping->CpuPAddr, pMapping->CpuVAddr,
					 pMapping->uSize);
				goto fail_cleanup;
			}
		} else {
			pMapping->eCpuMemoryOrigin =
			    hm_wrapped_scatter_virtaddr;
			pMapping->psSysAddr = psAddr;

			if (OSRegisterDiscontigMem(pMapping->psSysAddr,
						   pMapping->CpuVAddr,
						   pMapping->uSize,
						   uFlags,
						   &pMapping->hOSMemHandle) !=
			    PVRSRV_OK) {
				PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
					"OSRegisterDiscontigMem CpuVAddr = "
					"0x%08X, Size=%d) failed",
					 pMapping->CpuVAddr, pMapping->uSize);
				goto fail_cleanup;
			}
		}
	} else {
		if (bPhysContig) {
			pMapping->eCpuMemoryOrigin = hm_wrapped;
			pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(psAddr[0]);

			if (OSReservePhys(pMapping->CpuPAddr, pMapping->uSize,
					uFlags, &pMapping->CpuVAddr,
					&pMapping->hOSMemHandle) != PVRSRV_OK) {
				PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
					"OSReservePhys Phys=0x%08X, Size=%d) "
					"failed",
					 pMapping->CpuPAddr, pMapping->uSize);
				goto fail_cleanup;
			}
		} else {
			pMapping->eCpuMemoryOrigin = hm_wrapped_scatter;
			pMapping->psSysAddr = psAddr;

			if (OSReserveDiscontigPhys(pMapping->psSysAddr,
						   pMapping->uSize, uFlags,
						   &pMapping->CpuVAddr,
						   &pMapping->hOSMemHandle) !=
			    PVRSRV_OK) {
				PVR_DPF(PVR_DBG_ERROR, "WrapMemory: "
				       "OSReserveDiscontigPhys Size=%d) failed",
					 pMapping->uSize);
				goto fail_cleanup;
			}
		}
	}

	bResult = DevMemoryAlloc(psBMHeap->pBMContext, pMapping,
				 uFlags | PVRSRV_MEM_READ | PVRSRV_MEM_WRITE,
				 ui32PageSize, &DevVAddr);
	if (!bResult) {
		PVR_DPF(PVR_DBG_ERROR,
			 "WrapMemory: DevMemoryAlloc(0x%x) failed",
			 pMapping->uSize);
		goto fail_cleanup;
	}

	pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + ui32BaseOffset;
	if (!ui32BaseOffset)
		pBuf->hOSMemHandle = pMapping->hOSMemHandle;
	else
		if (OSGetSubMemHandle(pMapping->hOSMemHandle,
				      ui32BaseOffset,
				      (pMapping->uSize - ui32BaseOffset),
				      uFlags,
				      &pBuf->hOSMemHandle) != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR,
				 "WrapMemory: OSGetSubMemHandle failed");
			goto fail_cleanup;
		}
	if (pMapping->CpuVAddr)
		pBuf->CpuVAddr = (void *)((u32) pMapping->CpuVAddr +
							     ui32BaseOffset);
	pBuf->DevVAddr.uiAddr = pMapping->DevVAddr.uiAddr + ui32BaseOffset;

	if (uFlags & PVRSRV_MEM_ZERO)
		if (!ZeroBuf(pBuf, pMapping, uSize, uFlags))
			return IMG_FALSE;

	PVR_DPF(PVR_DBG_MESSAGE, "DevVaddr.uiAddr=%08X", DevVAddr.uiAddr);
	PVR_DPF(PVR_DBG_MESSAGE, "WrapMemory: pMapping=%08X: DevV=%08X "
			"CpuV=%08X CpuP=%08X uSize=0x%x",
		 pMapping, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr,
		 pMapping->CpuPAddr.uiAddr, pMapping->uSize);
	PVR_DPF(PVR_DBG_MESSAGE, "WrapMemory: pBuf=%08X: DevV=%08X "
				"CpuV=%08X CpuP=%08X uSize=0x%x",
		 pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
		 pBuf->CpuPAddr.uiAddr, uSize);

	pBuf->pMapping = pMapping;
	return IMG_TRUE;

fail_cleanup:
	if (ui32BaseOffset && pBuf->hOSMemHandle)
		OSReleaseSubMemHandle(pBuf->hOSMemHandle, uFlags);

	if (pMapping->CpuVAddr || pMapping->hOSMemHandle)
		switch (pMapping->eCpuMemoryOrigin) {
		case hm_wrapped:
			OSUnReservePhys(pMapping->CpuVAddr, pMapping->uSize,
					uFlags, pMapping->hOSMemHandle);
			break;
		case hm_wrapped_virtaddr:
			OSUnRegisterMem(pMapping->CpuVAddr, pMapping->uSize,
					uFlags, pMapping->hOSMemHandle);
			break;
		case hm_wrapped_scatter:
			OSUnReserveDiscontigPhys(pMapping->CpuVAddr,
						 pMapping->uSize, uFlags,
						 pMapping->hOSMemHandle);
			break;
		case hm_wrapped_scatter_virtaddr:
			OSUnRegisterDiscontigMem(pMapping->CpuVAddr,
						 pMapping->uSize, uFlags,
						 pMapping->hOSMemHandle);
			break;
		default:
			break;
		}

	OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping,
		  NULL);

	return IMG_FALSE;
}
Beispiel #20
0
PVRSRV_ERROR PDumpMem2KM(PVRSRV_DEVICE_TYPE eDeviceType,
			 IMG_CPU_VIRTADDR pvLinAddr,
			 u32 ui32Bytes,
			 u32 ui32Flags,
			 int bInitialisePages,
			 void *hUniqueTag1, void *hUniqueTag2)
{
	PVRSRV_ERROR eErr;
	u32 ui32NumPages;
	u32 ui32PageOffset;
	u32 ui32BlockBytes;
	u8 *pui8LinAddr;
	IMG_DEV_PHYADDR sDevPAddr;
	IMG_CPU_PHYADDR sCpuPAddr;
	u32 ui32Offset;
	u32 ui32ParamOutPos;

	PDUMP_GET_SCRIPT_AND_FILE_STRING();

	if (!pvLinAddr || !PDumpOSJTInitialised()) {
		return PVRSRV_ERROR_GENERIC;
	}

	if (PDumpOSIsSuspended()) {
		return PVRSRV_OK;
	}

	PDumpOSCheckForSplitting(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
				 ui32Bytes, ui32Flags);

	ui32ParamOutPos = PDumpOSGetStreamOffset(PDUMP_STREAM_PARAM2);

	if (bInitialisePages) {

		if (!PDumpOSWriteString(PDumpOSGetStream(PDUMP_STREAM_PARAM2),
					pvLinAddr,
					ui32Bytes, PDUMP_FLAGS_CONTINUOUS)) {
			return PVRSRV_ERROR_GENERIC;
		}

		if (PDumpOSGetParamFileNum() == 0) {
			eErr =
			    PDumpOSSprintf(pszFileName, ui32MaxLenFileName,
					   "%%0%%.prm");
		} else {
			eErr =
			    PDumpOSSprintf(pszFileName, ui32MaxLenFileName,
					   "%%0%%%lu.prm",
					   PDumpOSGetParamFileNum());
		}
		if (eErr != PVRSRV_OK) {
			return eErr;
		}
	}

	ui32PageOffset = (u32) pvLinAddr & (HOST_PAGESIZE() - 1);
	ui32NumPages =
	    (ui32PageOffset + ui32Bytes + HOST_PAGESIZE() -
	     1) / HOST_PAGESIZE();
	pui8LinAddr = (u8 *) pvLinAddr;

	while (ui32NumPages) {
		ui32NumPages--;
		sCpuPAddr = OSMapLinToCPUPhys(pui8LinAddr);
		sDevPAddr = SysCpuPAddrToDevPAddr(eDeviceType, sCpuPAddr);

		if (ui32PageOffset + ui32Bytes > HOST_PAGESIZE()) {

			ui32BlockBytes = HOST_PAGESIZE() - ui32PageOffset;
		} else {

			ui32BlockBytes = ui32Bytes;
		}

		if (bInitialisePages) {
			eErr = PDumpOSBufprintf(hScript,
						ui32MaxLenScript,
						"LDB :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX 0x%8.8lX %s\r\n",
						(u32) hUniqueTag1,
						sDevPAddr.
						uiAddr & ~(SGX_MMU_PAGE_MASK),
						sDevPAddr.
						uiAddr & (SGX_MMU_PAGE_MASK),
						ui32BlockBytes, ui32ParamOutPos,
						pszFileName);
			if (eErr != PVRSRV_OK) {
				return eErr;
			}
			PDumpOSWriteString2(hScript, PDUMP_FLAGS_CONTINUOUS);
		} else {
			for (ui32Offset = 0; ui32Offset < ui32BlockBytes;
			     ui32Offset += sizeof(u32)) {
				u32 ui32PTE =
				    *((u32 *) (pui8LinAddr + ui32Offset));

				if ((ui32PTE & SGX_MMU_PDE_ADDR_MASK) != 0) {
#if defined(SGX_FEATURE_36BIT_MMU)
					eErr = PDumpOSBufprintf(hScript,
								ui32MaxLenScript,
								"WRW :SGXMEM:$1 :SGXMEM:PA_%8.8lX%8.8lX:0x0\r\n",
								(u32)
								hUniqueTag2,
								(ui32PTE &
								 SGX_MMU_PDE_ADDR_MASK)
								<<
								SGX_MMU_PTE_ADDR_ALIGNSHIFT);
					if (eErr != PVRSRV_OK) {
						return eErr;
					}
					PDumpOSWriteString2(hScript,
							    PDUMP_FLAGS_CONTINUOUS);
					eErr =
					    PDumpOSBufprintf(hScript,
							     ui32MaxLenScript,
							     "SHR :SGXMEM:$1 :SGXMEM:$1 0x4\r\n");
					if (eErr != PVRSRV_OK) {
						return eErr;
					}
					PDumpOSWriteString2(hScript,
							    PDUMP_FLAGS_CONTINUOUS);
					eErr =
					    PDumpOSBufprintf(hScript,
							     ui32MaxLenScript,
							     "OR :SGXMEM:$1 :SGXMEM:$1 0x%8.8lX\r\n",
							     ui32PTE &
							     ~SGX_MMU_PDE_ADDR_MASK);
					if (eErr != PVRSRV_OK) {
						return eErr;
					}
					PDumpOSWriteString2(hScript,
							    PDUMP_FLAGS_CONTINUOUS);
					eErr =
					    PDumpOSBufprintf(hScript,
							     ui32MaxLenScript,
							     "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:$1\r\n",
							     (u32) hUniqueTag1,
							     (sDevPAddr.uiAddr +
							      ui32Offset) &
							     ~
							     (SGX_MMU_PAGE_MASK),
							     (sDevPAddr.uiAddr +
							      ui32Offset) &
							     (SGX_MMU_PAGE_MASK));
					if (eErr != PVRSRV_OK) {
						return eErr;
					}
					PDumpOSWriteString2(hScript,
							    PDUMP_FLAGS_CONTINUOUS);
#else
					eErr = PDumpOSBufprintf(hScript,
								ui32MaxLenScript,
								"WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX\r\n",
								(u32)
								hUniqueTag1,
								(sDevPAddr.
								 uiAddr +
								 ui32Offset) &
								~
								(SGX_MMU_PAGE_MASK),
								(sDevPAddr.
								 uiAddr +
								 ui32Offset) &
								(SGX_MMU_PAGE_MASK),
								(u32)
								hUniqueTag2,
								(ui32PTE &
								 SGX_MMU_PDE_ADDR_MASK)
								<<
								SGX_MMU_PTE_ADDR_ALIGNSHIFT,
								ui32PTE &
								~SGX_MMU_PDE_ADDR_MASK);
					if (eErr != PVRSRV_OK) {
						return eErr;
					}
#endif
				} else {
					PVR_ASSERT((ui32PTE & SGX_MMU_PTE_VALID)
						   == 0UL);
					eErr =
					    PDumpOSBufprintf(hScript,
							     ui32MaxLenScript,
							     "WRW :SGXMEM:PA_%8.8lX%8.8lX:0x%8.8lX 0x%8.8lX%8.8lX\r\n",
							     (u32) hUniqueTag1,
							     (sDevPAddr.uiAddr +
							      ui32Offset) &
							     ~
							     (SGX_MMU_PAGE_MASK),
							     (sDevPAddr.uiAddr +
							      ui32Offset) &
							     (SGX_MMU_PAGE_MASK),
							     (ui32PTE <<
							      SGX_MMU_PTE_ADDR_ALIGNSHIFT),
							     (u32) hUniqueTag2);
					if (eErr != PVRSRV_OK) {
						return eErr;
					}
				}
				PDumpOSWriteString2(hScript,
						    PDUMP_FLAGS_CONTINUOUS);
			}
		}

		ui32PageOffset = 0;

		ui32Bytes -= ui32BlockBytes;

		pui8LinAddr += ui32BlockBytes;

		ui32ParamOutPos += ui32BlockBytes;
	}

	return PVRSRV_OK;
}
Beispiel #21
0
static IMG_VOID SysSaveRestoreArenaLiveSegments(IMG_BOOL bSave)
{
	IMG_SIZE_T        uiBufferSize;
	static IMG_PVOID  pvBackupBuffer = IMG_NULL;
	static IMG_HANDLE hBlockAlloc;
	static IMG_UINT32 uiWriteBufferSize = 0;
	PVRSRV_ERROR eError;

	uiBufferSize = 0;


	if (gpsSysData->apsLocalDevMemArena[0] != IMG_NULL)
	{

		if (PVRSRVSaveRestoreLiveSegments((IMG_HANDLE)gpsSysData->apsLocalDevMemArena[0], IMG_NULL, &uiBufferSize, bSave) == PVRSRV_OK)
		{
			if (uiBufferSize)
			{
				if (bSave && pvBackupBuffer == IMG_NULL)
				{
					uiWriteBufferSize = uiBufferSize;

					/* Allocate OS memory in which to store the device's local memory. */
					eError = OSAllocPages(PVRSRV_HAP_KERNEL_ONLY | PVRSRV_HAP_CACHED,
										  uiBufferSize,
										  HOST_PAGESIZE(),
										  IMG_NULL,
										  0,
										  IMG_NULL,
										  &pvBackupBuffer,
										  &hBlockAlloc);
					if (eError != PVRSRV_OK)
					{
						PVR_DPF((PVR_DBG_ERROR,
								"SysSaveRestoreArenaLiveSegments: OSAllocPages(0x%" SIZE_T_FMT_LEN "x) failed:%u", uiBufferSize, eError));
						return;
					}
				}
				else
				{
					PVR_ASSERT (uiWriteBufferSize == uiBufferSize);
				}


				PVRSRVSaveRestoreLiveSegments((IMG_HANDLE)gpsSysData->apsLocalDevMemArena[0], pvBackupBuffer, &uiBufferSize, bSave);

				if (!bSave && pvBackupBuffer)
				{
					/* Free the OS memory. */
					eError = OSFreePages(PVRSRV_HAP_KERNEL_ONLY | PVRSRV_HAP_CACHED,
										 uiWriteBufferSize,
										 pvBackupBuffer,
										 hBlockAlloc);
					if (eError != PVRSRV_OK)
					{
						PVR_DPF((PVR_DBG_ERROR,	"SysSaveRestoreArenaLiveSegments: OSFreePages(0x%" SIZE_T_FMT_LEN "x) failed:%u",
								uiBufferSize, eError));
					}

					pvBackupBuffer = IMG_NULL;
				}
			}
		}
	}

}
Beispiel #22
0
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVMapDeviceClassMemoryKM(PVRSRV_PER_PROCESS_DATA	*psPerProc,
													   IMG_HANDLE				hDevMemContext,
													   IMG_HANDLE				hDeviceClassBuffer,
													   PVRSRV_KERNEL_MEM_INFO	**ppsMemInfo,
													   IMG_HANDLE				*phOSMapInfo)
{
	PVRSRV_ERROR eError;
	PVRSRV_DEVICE_NODE* psDeviceNode;
	PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
	PVRSRV_DEVICECLASS_BUFFER *psDeviceClassBuffer;
	IMG_SYS_PHYADDR *psSysPAddr;
	IMG_VOID *pvCPUVAddr, *pvPageAlignedCPUVAddr;
	IMG_BOOL bPhysContig;
	BM_CONTEXT *psBMContext;
	DEVICE_MEMORY_INFO *psDevMemoryInfo;
	DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;
	IMG_HANDLE hDevMemHeap = IMG_NULL;
	IMG_SIZE_T ui32ByteSize;
	IMG_SIZE_T ui32Offset;
	IMG_SIZE_T ui32PageSize = HOST_PAGESIZE();
	BM_HANDLE		hBuffer;
	PVRSRV_MEMBLK	*psMemBlock;
	IMG_BOOL		bBMError;
	IMG_UINT32 i;
	PVRSRV_DC_MAPINFO *psDCMapInfo = IMG_NULL;

	if(!hDeviceClassBuffer || !ppsMemInfo || !phOSMapInfo || !hDevMemContext)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: invalid parameters"));
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	
	if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
					sizeof(PVRSRV_DC_MAPINFO),
					(IMG_VOID **)&psDCMapInfo, IMG_NULL,
					"PVRSRV_DC_MAPINFO") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for psDCMapInfo"));
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}
	OSMemSet(psDCMapInfo, 0, sizeof(PVRSRV_DC_MAPINFO));

	psDeviceClassBuffer = (PVRSRV_DEVICECLASS_BUFFER*)hDeviceClassBuffer;

	


















	eError = psDeviceClassBuffer->pfnGetBufferAddr(psDeviceClassBuffer->hExtDevice,
												   psDeviceClassBuffer->hExtBuffer,
												   &psSysPAddr,
												   &ui32ByteSize,
												   &pvCPUVAddr,
												   phOSMapInfo,
												   &bPhysContig,
												   &psDCMapInfo->ui32TilingStride);
	if(eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to get buffer address"));
		goto ErrorExitPhase1;
	}

	
	psBMContext = (BM_CONTEXT*)psDeviceClassBuffer->hDevMemContext;
	psDeviceNode = psBMContext->psDeviceNode;
	psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
	psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
	for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
	{
		if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
		{
			if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
			{
				
				hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
			}
			else
			{
				hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
			}
			break;
		}
	}

	if(hDevMemHeap == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: unable to find mapping heap"));
		eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE;
		goto ErrorExitPhase1;
	}

	
	ui32Offset = ((IMG_UINTPTR_T)pvCPUVAddr) & (ui32PageSize - 1);
	pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvCPUVAddr - ui32Offset);

	eError = OSAllocMem(PVRSRV_PAGEABLE_SELECT,
					sizeof(PVRSRV_KERNEL_MEM_INFO),
					(IMG_VOID **)&psMemInfo, IMG_NULL,
					"Kernel Memory Info");
	if(eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: Failed to alloc memory for block"));
		goto ErrorExitPhase1;
	}

	OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));

	psMemBlock = &(psMemInfo->sMemBlk);

	bBMError = BM_Wrap(hDevMemHeap,
					   ui32ByteSize,
					   ui32Offset,
					   bPhysContig,
					   psSysPAddr,
					   pvPageAlignedCPUVAddr,
					   &psMemInfo->ui32Flags,
					   &hBuffer);

	if (!bBMError)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: BM_Wrap Failed"));
		
		eError = PVRSRV_ERROR_BAD_MAPPING;
		goto ErrorExitPhase2;
	}

	
	psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
	psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);

	
	psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;

	

	psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);

	
	psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
	psMemInfo->ui32AllocSize = ui32ByteSize;
	psMemInfo->psKernelSyncInfo = psDeviceClassBuffer->psKernelSyncInfo;

	

	psMemInfo->pvSysBackupBuffer = IMG_NULL;

	
	psDCMapInfo->psMemInfo = psMemInfo;

#if defined(SUPPORT_MEMORY_TILING)
	psDCMapInfo->psDeviceNode = psDeviceNode;

	if(psDCMapInfo->ui32TilingStride > 0)
	{
		
		eError = psDeviceNode->pfnAllocMemTilingRange(psDeviceNode,
														psMemInfo,
														psDCMapInfo->ui32TilingStride,
														&psDCMapInfo->ui32RangeIndex);
		if (eError != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,"PVRSRVMapDeviceClassMemoryKM: AllocMemTilingRange failed"));
			goto ErrorExitPhase3;
		}
	}
#endif

	
	psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
													RESMAN_TYPE_DEVICECLASSMEM_MAPPING,
													psDCMapInfo,
													0,
													&UnmapDeviceClassMemoryCallBack);

	psMemInfo->ui32RefCount++;

	psMemInfo->memType = PVRSRV_MEMTYPE_DEVICECLASS;

	
	*ppsMemInfo = psMemInfo;

#if defined(SUPPORT_PDUMP_MULTI_PROCESS)

	PDUMPCOMMENT("Dump display surface");
	PDUMPMEM(IMG_NULL, psMemInfo, ui32Offset, psMemInfo->ui32AllocSize, PDUMP_FLAGS_CONTINUOUS, ((BM_BUF*)psMemInfo->sMemBlk.hBuffer)->pMapping);
#endif
	return PVRSRV_OK;

#if defined(SUPPORT_MEMORY_TILING)
ErrorExitPhase3:
	if(psMemInfo)
	{
		FreeDeviceMem(psMemInfo);
		


		psMemInfo = IMG_NULL;
	}
#endif

ErrorExitPhase2:
	if(psMemInfo)
	{
		OSFreeMem(PVRSRV_PAGEABLE_SELECT, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
	}

ErrorExitPhase1:
	if(psDCMapInfo)
	{
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psDCMapInfo, IMG_NULL);
	}

	return eError;
}
Beispiel #23
0
/*!
******************************************************************************

 @Function	SysInitialise

 @Description Initialises kernel services at 'driver load' time

 @Return   PVRSRV_ERROR  :

******************************************************************************/
PVRSRV_ERROR SysInitialise(IMG_VOID)
{
	IMG_UINT32			i;
	PVRSRV_ERROR 		eError;
	PVRSRV_DEVICE_NODE	*psDeviceNode;
	SGX_TIMING_INFORMATION* psTimingInfo;

	gpsSysData = &gsSysData;
	OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));

	gpsSysData->pvSysSpecificData = &gsSysSpecificData;
	gsSysSpecificData.ui32SysSpecificData = 0;
#if defined(LDM_PCI) || defined(SUPPORT_DRI_DRM)
	/* Save the pci_dev structure pointer from module.c */
	PVR_ASSERT(gpsPVRLDMDev != IMG_NULL);
	gsSysSpecificData.psPCIDev = gpsPVRLDMDev;
#endif

	eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
		SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA);

	/* Set up timing information*/
	psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
	psTimingInfo->ui32CoreClockSpeed = SYS_SGX_CLOCK_SPEED;
	psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ;
#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
	psTimingInfo->bEnableActivePM = IMG_TRUE;
#else
	psTimingInfo->bEnableActivePM = IMG_FALSE;
#endif /* SUPPORT_ACTIVE_POWER_MANAGEMENT */
	psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
	psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ;

#ifdef __linux__
	eError = PCIInitDev(gpsSysData);
	if (eError != PVRSRV_OK)
	{
		SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PCINIT);
#endif

	gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;

	/* init device ID's */
	for(i=0; i<SYS_DEVICE_COUNT; i++)
	{
		gpsSysData->sDeviceID[i].uiID = i;
		gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
	}

	gpsSysData->psDeviceNodeList = IMG_NULL;
	gpsSysData->psQueueList = IMG_NULL;

	eError = SysInitialiseCommon(gpsSysData);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
		SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}

	/*
		Locate the devices within the system, specifying
		the physical addresses of each devices components
		(regs, mem, ports etc.)
	*/
	eError = SysLocateDevices(gpsSysData);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
		SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);

	/*
		Map the Atlas and PDP registers.
	*/
	gpsSysData->pvSOCRegsBase = OSMapPhysToLin(gsSOCRegsCpuPBase,
											   SYS_ATLAS_REG_REGION_SIZE,
	                                           PVRSRV_HAP_UNCACHED|PVRSRV_HAP_KERNEL_ONLY,
	                                           IMG_NULL);

	/*
		Do some initial register setup
	*/
	eError = SysInitRegisters();
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitRegisters: Failed to initialise registers"));
		SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITREG);

#if defined(READ_TCF_HOST_MEM_SIGNATURE)
	SysTestTCFHostMemSig(gsSGXDeviceMap.sRegsCpuPBase,
						 gsSGXDeviceMap.sLocalMemCpuPBase);
#endif

	/*
		Register devices with the system
		This also sets up their memory maps/heaps
	*/
	eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
								  SysGetSGXInterruptBit(), &gui32SGXDeviceID);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
		SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_REGDEV);

	/*
		A given SOC may have 0 to n local device memory blocks.
		It is assumed device local memory blocks will be either
		specific to each device or shared among one or more services
		managed devices.
		Note: no provision is made for a single device having more
		than one local device memory blocks. If required we must
		add new types,
		e.g.PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG0
			PVRSRV_BACKINGSTORE_LOCALMEM_NONCONTIG1
	*/

	/*
		Create Local Device Page Managers for each device memory block.
		We'll use an RA to do the management
		(only one for the emulator)
	*/
	gpsSysData->apsLocalDevMemArena[0] = RA_Create ("TestChipDeviceMemory",
													gsSGXDeviceMap.sLocalMemSysPBase.uiAddr,
													gsSGXDeviceMap.ui32LocalMemSize,
													IMG_NULL,
													HOST_PAGESIZE(),
													IMG_NULL,
													IMG_NULL,
													IMG_NULL,
													IMG_NULL);

	if (gpsSysData->apsLocalDevMemArena[0] == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to create local dev mem allocator!"));
		SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_RA_ARENA);

	/*
		Once all devices are registered, specify the backing store
		and, if required, customise the memory heap config
	*/
	psDeviceNode = gpsSysData->psDeviceNodeList;
	while(psDeviceNode)
	{
		/* perform any OEM SOC address space customisations here */
		switch(psDeviceNode->sDevId.eDeviceType)
		{
			case PVRSRV_DEVICE_TYPE_SGX:
			{
				DEVICE_MEMORY_INFO *psDevMemoryInfo;
				DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;

				/* specify the backing store to use for the device's MMU PT/PDs */
				psDeviceNode->psLocalDevMemArena = gpsSysData->apsLocalDevMemArena[0];

				/* useful pointers */
				psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
				psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;

				/* specify the backing store for all SGX heaps */
				for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
				{
					/*
						specify the backing store type
						(all local device mem noncontig) for test chip
					*/
					psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG;

					/*
						map the device memory allocator(s) onto
						the device memory heaps as required
					*/
					psDeviceMemoryHeap[i].psLocalDevMemArena = gpsSysData->apsLocalDevMemArena[0];

#ifdef OEM_CUSTOMISE
					/* if required, modify the memory config */
#endif
				}
				break;
			}
			default:
				break;
		}

		/* advance to next device */
		psDeviceNode = psDeviceNode->psNext;
	}

	/*
		Initialise all devices 'managed' by services:
	*/
	eError = PVRSRVInitialiseDevice (gui32SGXDeviceID);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
		SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV);

	return PVRSRV_OK;
}
Beispiel #24
0
IMG_EXPORT
PVRSRV_ERROR IMG_CALLCONV PVRSRVWrapExtMemoryKM(IMG_HANDLE				hDevCookie,
												PVRSRV_PER_PROCESS_DATA	*psPerProc,
												IMG_HANDLE				hDevMemContext,
												IMG_SIZE_T 				ui32ByteSize,
												IMG_SIZE_T				ui32PageOffset,
												IMG_BOOL				bPhysContig,
												IMG_SYS_PHYADDR	 		*psExtSysPAddr,
												IMG_VOID 				*pvLinAddr,
												IMG_UINT32				ui32Flags,
												PVRSRV_KERNEL_MEM_INFO	**ppsMemInfo)
{
	PVRSRV_KERNEL_MEM_INFO *psMemInfo = IMG_NULL;
	DEVICE_MEMORY_INFO  *psDevMemoryInfo;
	IMG_SIZE_T			ui32HostPageSize = HOST_PAGESIZE();
	IMG_HANDLE			hDevMemHeap = IMG_NULL;
	PVRSRV_DEVICE_NODE* psDeviceNode;
	BM_HANDLE 			hBuffer;
	PVRSRV_MEMBLK		*psMemBlock;
	IMG_BOOL			bBMError;
	BM_HEAP				*psBMHeap;
	PVRSRV_ERROR		eError;
	IMG_VOID 			*pvPageAlignedCPUVAddr;
	IMG_SYS_PHYADDR	 	*psIntSysPAddr = IMG_NULL;
	IMG_HANDLE			hOSWrapMem = IMG_NULL;
	DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;	
	IMG_UINT32		i;
        IMG_SIZE_T              ui32PageCount = 0;
	
	
	psDeviceNode = (PVRSRV_DEVICE_NODE*)hDevCookie;
	PVR_ASSERT(psDeviceNode != IMG_NULL);

	if (psDeviceNode == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR, "PVRSRVWrapExtMemoryKM: invalid parameter"));
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	if(pvLinAddr)
	{
		
		ui32PageOffset = (IMG_UINTPTR_T)pvLinAddr & (ui32HostPageSize - 1);

		
		ui32PageCount = HOST_PAGEALIGN(ui32ByteSize + ui32PageOffset) / ui32HostPageSize;
		pvPageAlignedCPUVAddr = (IMG_VOID *)((IMG_UINTPTR_T)pvLinAddr - ui32PageOffset);

		
		if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
						ui32PageCount * sizeof(IMG_SYS_PHYADDR),
						(IMG_VOID **)&psIntSysPAddr, IMG_NULL,
						"Array of Page Addresses") != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
			return PVRSRV_ERROR_OUT_OF_MEMORY;
		}

		eError = OSAcquirePhysPageAddr(pvPageAlignedCPUVAddr,
										ui32PageCount * ui32HostPageSize,
										psIntSysPAddr,
										&hOSWrapMem);
		if(eError != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
			goto ErrorExitPhase1;
		}

		
		psExtSysPAddr = psIntSysPAddr;

		

		bPhysContig = IMG_FALSE;
	}
	else
	{
		
	}
	
	
	psDevMemoryInfo = &((BM_CONTEXT*)hDevMemContext)->psDeviceNode->sDevMemoryInfo;
	psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
	for(i=0; i<PVRSRV_MAX_CLIENT_HEAPS; i++)
	{
		if(HEAP_IDX(psDeviceMemoryHeap[i].ui32HeapID) == psDevMemoryInfo->ui32MappingHeapID)
		{
			if(psDeviceMemoryHeap[i].DevMemHeapType == DEVICE_MEMORY_HEAP_PERCONTEXT)
			{
				
				hDevMemHeap = BM_CreateHeap(hDevMemContext, &psDeviceMemoryHeap[i]);
			}
			else
			{
				hDevMemHeap = psDevMemoryInfo->psDeviceMemoryHeap[i].hDevMemHeap;
			}
			break;
		}
	}

	if(hDevMemHeap == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: unable to find mapping heap"));
		eError = PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP;
		goto ErrorExitPhase2;
	}

	if(OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
					sizeof(PVRSRV_KERNEL_MEM_INFO),
					(IMG_VOID **)&psMemInfo, IMG_NULL,
					"Kernel Memory Info") != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: Failed to alloc memory for block"));
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto ErrorExitPhase2;
	}

	OSMemSet(psMemInfo, 0, sizeof(*psMemInfo));
	psMemInfo->ui32Flags = ui32Flags;

	psMemBlock = &(psMemInfo->sMemBlk);

	bBMError = BM_Wrap(hDevMemHeap,
					   ui32ByteSize,
					   ui32PageOffset,
					   bPhysContig,
					   psExtSysPAddr,
					   IMG_NULL,
					   &psMemInfo->ui32Flags,
					   &hBuffer);
	if (!bBMError)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVWrapExtMemoryKM: BM_Wrap Failed"));
		eError = PVRSRV_ERROR_BAD_MAPPING;
		goto ErrorExitPhase3;
	}

	
	psMemBlock->sDevVirtAddr = BM_HandleToDevVaddr(hBuffer);
	psMemBlock->hOSMemHandle = BM_HandleToOSMemHandle(hBuffer);
	psMemBlock->hOSWrapMem = hOSWrapMem;
	psMemBlock->psIntSysPAddr = psIntSysPAddr;

	
	psMemBlock->hBuffer = (IMG_HANDLE)hBuffer;

	
	psMemInfo->pvLinAddrKM = BM_HandleToCpuVaddr(hBuffer);
	psMemInfo->sDevVAddr = psMemBlock->sDevVirtAddr;
	psMemInfo->ui32AllocSize = ui32ByteSize;

	

	psMemInfo->pvSysBackupBuffer = IMG_NULL;

	


	psBMHeap = (BM_HEAP*)hDevMemHeap;
	hDevMemContext = (IMG_HANDLE)psBMHeap->pBMContext;
	eError = PVRSRVAllocSyncInfoKM(hDevCookie,
									hDevMemContext,
									&psMemInfo->psKernelSyncInfo);
	if(eError != PVRSRV_OK)
	{
		goto ErrorExitPhase4;
	}

	psMemInfo->psKernelSyncInfo->ui32RefCount++;

	
	psMemInfo->ui32RefCount++;

	psMemInfo->memType = PVRSRV_MEMTYPE_WRAPPED;

	
	psMemInfo->sMemBlk.hResItem = ResManRegisterRes(psPerProc->hResManContext,
													RESMAN_TYPE_DEVICEMEM_WRAP,
													psMemInfo,
													0,
													&UnwrapExtMemoryCallBack);

	
	*ppsMemInfo = psMemInfo;

	return PVRSRV_OK;

	

ErrorExitPhase4:
	if(psMemInfo)
	{
		FreeDeviceMem(psMemInfo);
		


		psMemInfo = IMG_NULL;
	}

ErrorExitPhase3:
	if(psMemInfo)
	{
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(PVRSRV_KERNEL_MEM_INFO), psMemInfo, IMG_NULL);
		
	}

ErrorExitPhase2:
	if(psIntSysPAddr)
	{
		OSReleasePhysPageAddr(hOSWrapMem);
	}

ErrorExitPhase1:
	if(psIntSysPAddr)
	{
		OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, ui32PageCount * sizeof(IMG_SYS_PHYADDR), psIntSysPAddr, IMG_NULL);
		
	}
	
	return eError;
}
static void MMU_UnmapPagesAndFreePTs(struct MMU_HEAP *psMMUHeap,
			 struct IMG_DEV_VIRTADDR sDevVAddr,
			 u32 ui32PageCount, void *hUniqueTag)
{
	u32 uPageSize = HOST_PAGESIZE();
	struct IMG_DEV_VIRTADDR sTmpDevVAddr;
	u32 i;
	u32 ui32PDIndex;
	u32 ui32PTIndex;
	u32 *pui32Tmp;
	IMG_BOOL bInvalidateDirectoryCache = IMG_FALSE;

#if !defined(PDUMP)
	PVR_UNREFERENCED_PARAMETER(hUniqueTag);
#endif

	sTmpDevVAddr = sDevVAddr;

	for (i = 0; i < ui32PageCount; i++) {
		struct MMU_PT_INFO **ppsPTInfoList;

		ui32PDIndex =
		    sTmpDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT +
					    SGX_MMU_PT_SHIFT);

		ppsPTInfoList =
		    &psMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];

		{
			ui32PTIndex = (sTmpDevVAddr.uiAddr & SGX_MMU_PT_MASK)
						>> SGX_MMU_PAGE_SHIFT;

			if (!ppsPTInfoList[0]) {
				PVR_DPF(PVR_DBG_MESSAGE,
					"MMU_UnmapPagesAndFreePTs: "
					"Invalid PT for alloc at VAddr:0x%08lX "
					"(VaddrIni:0x%08lX AllocPage:%u) "
					"PDIdx:%u PTIdx:%u",
					 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
					 i, ui32PDIndex, ui32PTIndex);

				sTmpDevVAddr.uiAddr += uPageSize;

				continue;
			}

			pui32Tmp = (u32 *)ppsPTInfoList[0]->PTPageCpuVAddr;

			if (!pui32Tmp)
				continue;

			if (pui32Tmp[ui32PTIndex] & SGX_MMU_PTE_VALID) {
				ppsPTInfoList[0]->ui32ValidPTECount--;
			} else {
				PVR_DPF(PVR_DBG_MESSAGE,
					 "MMU_UnmapPagesAndFreePTs: "
					 "Page is already invalid for alloc at "
					 "VAddr:0x%08lX "
					 "(VAddrIni:0x%08lX AllocPage:%u) "
					 "PDIdx:%u PTIdx:%u",
					 sTmpDevVAddr.uiAddr, sDevVAddr.uiAddr,
					 i, ui32PDIndex, ui32PTIndex);
			}

			PVR_ASSERT((s32)ppsPTInfoList[0]->ui32ValidPTECount >=
									0);
			pui32Tmp[ui32PTIndex] = 0;
		}

		if (ppsPTInfoList[0]
		    && ppsPTInfoList[0]->ui32ValidPTECount == 0) {
			_DeferredFreePageTable(psMMUHeap,
					       ui32PDIndex - (psMMUHeap->
						   ui32PTBaseIndex >>
						       SGX_MMU_PT_SHIFT));
			bInvalidateDirectoryCache = IMG_TRUE;
		}

		sTmpDevVAddr.uiAddr += uPageSize;
	}