Exemplo n.º 1
0
static IMG_BOOL
AllocMemory (BM_CONTEXT				*pBMContext,
				BM_HEAP				*psBMHeap,
				IMG_DEV_VIRTADDR	*psDevVAddr,
				IMG_SIZE_T			uSize,
				IMG_UINT32			uFlags,
				IMG_UINT32			uDevVAddrAlignment,
				BM_BUF				*pBuf)
{
	BM_MAPPING			*pMapping;
	IMG_UINTPTR_T		uOffset;
	RA_ARENA			*pArena = IMG_NULL;

	PVR_DPF ((PVR_DBG_MESSAGE,
			  "AllocMemory (uSize=0x%x, uFlags=0x%x, align=0x%x)",
			  uSize, uFlags, uDevVAddrAlignment));

	


	if(uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION)
	{
		if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
		{
			
			PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: combination of DevVAddr management and RAM backing mode unsupported"));
			return IMG_FALSE;
		}

		

		
		if(psBMHeap->ui32Attribs
		   &	(PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG
		   |PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG))
		{
			
			pArena = psBMHeap->pImportArena;
			PVR_ASSERT(psBMHeap->sDevArena.psDeviceMemoryHeapInfo->ui32Attribs & PVRSRV_MEM_RAM_BACKED_ALLOCATION);
		}
		else
		{
			PVR_DPF ((PVR_DBG_ERROR, "AllocMemory: backing store type doesn't match heap"));
			return IMG_FALSE;
		}

		
		if (!RA_Alloc(pArena,
					  uSize,
					  IMG_NULL,
					  (IMG_VOID*) &pMapping,
					  uFlags,
					  uDevVAddrAlignment,
					  0,
					  (IMG_UINTPTR_T *)&(pBuf->DevVAddr.uiAddr)))
		{
			PVR_DPF((PVR_DBG_ERROR, "AllocMemory: RA_Alloc(0x%x) FAILED", uSize));
			return IMG_FALSE;
		}

		uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
		if(pMapping->CpuVAddr)
		{
			pBuf->CpuVAddr = (IMG_VOID*) ((IMG_UINTPTR_T)pMapping->CpuVAddr + uOffset);
		}
		else
		{
			pBuf->CpuVAddr = IMG_NULL;
		}

		if(uSize == pMapping->uSize)
		{
			pBuf->hOSMemHandle = pMapping->hOSMemHandle;
		}
		else
		{
			if(OSGetSubMemHandle(pMapping->hOSMemHandle,
								 uOffset,
								 uSize,
								 psBMHeap->ui32Attribs,
								 &pBuf->hOSMemHandle)!=PVRSRV_OK)
			{
				PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSGetSubMemHandle FAILED"));
				return IMG_FALSE;
			}
		}

		
		pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;

		if(uFlags & PVRSRV_MEM_ZERO)
		{
			if(!ZeroBuf(pBuf, pMapping, uSize, psBMHeap->ui32Attribs | uFlags))
			{
				return IMG_FALSE;
			}
		}
	}
	else
	{
		if(uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR)
		{
			
			PVR_ASSERT(psDevVAddr != IMG_NULL);

			if (psDevVAddr == IMG_NULL)
			{
				PVR_DPF((PVR_DBG_ERROR, "AllocMemory: invalid parameter - psDevVAddr"));
				return IMG_FALSE;
			}

			
			pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
													uSize,
													IMG_NULL,
													PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
													uDevVAddrAlignment,
													psDevVAddr);

			
			pBuf->DevVAddr = *psDevVAddr;
		}
		else
		{
			

			
			pBMContext->psDeviceNode->pfnMMUAlloc (psBMHeap->pMMUHeap,
													uSize,
													IMG_NULL,
													0,
													uDevVAddrAlignment,
													&pBuf->DevVAddr);
		}

		
		if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
							sizeof (struct _BM_MAPPING_),
							(IMG_PVOID *)&pMapping, IMG_NULL,
							"Buffer Manager Mapping") != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR, "AllocMemory: OSAllocMem(0x%x) FAILED", sizeof(*pMapping)));
			return IMG_FALSE;
		}

		
		pBuf->CpuVAddr = IMG_NULL;
		pBuf->hOSMemHandle = 0;
		pBuf->CpuPAddr.uiAddr = 0;

		
		pMapping->CpuVAddr = IMG_NULL;
		pMapping->CpuPAddr.uiAddr = 0;
		pMapping->DevVAddr = pBuf->DevVAddr;
		pMapping->psSysAddr = IMG_NULL;
		pMapping->uSize = uSize;
		pMapping->hOSMemHandle = 0;
	}

	
	pMapping->pArena = pArena;

	
	pMapping->pBMHeap = psBMHeap;
	pBuf->pMapping = pMapping;

	
	PVR_DPF ((PVR_DBG_MESSAGE,
				"AllocMemory: pMapping=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x",
				(IMG_UINTPTR_T)pMapping,
				pMapping->DevVAddr.uiAddr,
				(IMG_UINTPTR_T)pMapping->CpuVAddr,
				pMapping->CpuPAddr.uiAddr,
				pMapping->uSize));

	PVR_DPF ((PVR_DBG_MESSAGE,
				"AllocMemory: pBuf=%08x: DevV=%08X CpuV=%08x CpuP=%08X uSize=0x%x",
				(IMG_UINTPTR_T)pBuf,
				pBuf->DevVAddr.uiAddr,
				(IMG_UINTPTR_T)pBuf->CpuVAddr,
				pBuf->CpuPAddr.uiAddr,
				uSize));

	
	PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);

	return IMG_TRUE;
}
/*
	Map an import to the device
*/
IMG_INTERNAL
PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
									   IMG_BOOL bMap,
									   DEVMEM_IMPORT *psImport)
{
	DEVMEM_DEVICE_IMPORT *psDeviceImport;
	IMG_BOOL bStatus;
    RA_BASE_T uiAllocatedAddr;
    RA_LENGTH_T uiAllocatedSize;
    IMG_DEV_VIRTADDR sBase;
    IMG_HANDLE hReservation;
    PVRSRV_ERROR eError;

	psDeviceImport = &psImport->sDeviceImport;

	OSLockAcquire(psDeviceImport->hLock);
	DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
					__FUNCTION__,
					psImport,
					psDeviceImport->ui32RefCount,
					psDeviceImport->ui32RefCount+1);

	if (psDeviceImport->ui32RefCount++ == 0)
	{
		_DevmemImportStructAcquire(psImport);

		OSLockAcquire(psHeap->hLock);
		psHeap->uiImportCount++;
		OSLockRelease(psHeap->hLock);

		if (psHeap->psCtx->hBridge != psImport->hBridge)
		{
			/*
				The import was done with a different connection then the
				memory context which means they are not compatible.
			*/
			eError = PVRSRV_ERROR_INVALID_PARAMS;
			goto failCheck;
		}

		/* Allocate space in the VM */
	    bStatus = RA_Alloc(psHeap->psQuantizedVMRA,
	                       psImport->uiSize,
	                       0, /* flags: this RA doesn't use flags*/
	                       psImport->uiAlign,
	                       &uiAllocatedAddr,
	                       &uiAllocatedSize,
	                       IMG_NULL /* don't care about per-import priv data */
	                       );
	    if (!bStatus)
	    {
	        eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM;
	        goto failVMRAAlloc;
	    }
	
	    /* No reason for the allocated virtual size to be different from
	       the PMR's size */
	    PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
	
	    sBase.uiAddr = uiAllocatedAddr;
	
		/* Setup page tables for the allocated VM space */
	    eError = BridgeDevmemIntReserveRange(psHeap->psCtx->hBridge,
											 psHeap->hDevMemServerHeap,
											 sBase,
											 uiAllocatedSize,
											 &hReservation);
	    if (eError != PVRSRV_OK)
	    {
	        goto failReserve;
	    }

		if (bMap)
		{
			DEVMEM_FLAGS_T uiMapFlags;
			
			uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK;

			/* Actually map the PMR to allocated VM space */
			eError = BridgeDevmemIntMapPMR(psHeap->psCtx->hBridge,
										   psHeap->hDevMemServerHeap,
										   hReservation,
										   psImport->hPMR,
										   uiMapFlags,
										   &psDeviceImport->hMapping);
			if (eError != PVRSRV_OK)
			{
				goto failMap;
			}
			psDeviceImport->bMapped = IMG_TRUE;
		}

		/* Setup device mapping specific parts of the mapping info */
	    psDeviceImport->hReservation = hReservation;
		psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr;
		psDeviceImport->psHeap = psHeap;
	}
	else
	{
		/*
			Check that we've been asked to map it into the
			same heap 2nd time around
		*/
		if (psHeap != psDeviceImport->psHeap)
		{
			eError = PVRSRV_ERROR_INVALID_HEAP;
			goto failParams;
		}
	}
	OSLockRelease(psDeviceImport->hLock);

	return PVRSRV_OK;

failMap:
	BridgeDevmemIntUnreserveRange(psHeap->psCtx->hBridge,
								  hReservation);
failReserve:
	RA_Free(psHeap->psQuantizedVMRA,
            uiAllocatedAddr);
failVMRAAlloc:
failCheck:
	_DevmemImportStructRelease(psImport);
	OSLockAcquire(psHeap->hLock);
	psHeap->uiImportCount--;
	OSLockRelease(psHeap->hLock);
failParams:
	OSLockRelease(psDeviceImport->hLock);
	PVR_ASSERT(eError != PVRSRV_OK);
	return eError;
}
Exemplo n.º 3
0
static IMG_BOOL
BM_ImportMemory (IMG_VOID *pH,
			  IMG_SIZE_T uRequestSize,
			  IMG_SIZE_T *pActualSize,
			  BM_MAPPING **ppsMapping,
			  IMG_UINT32 uFlags,
			  IMG_UINTPTR_T *pBase)
{
	BM_MAPPING *pMapping;
	BM_HEAP *pBMHeap = pH;
	BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
	IMG_BOOL bResult;
	IMG_SIZE_T uSize;
	IMG_SIZE_T uPSize;
	IMG_UINT32 uDevVAddrAlignment = 0;

	PVR_DPF ((PVR_DBG_MESSAGE,
			  "BM_ImportMemory (pBMContext=0x%x, uRequestSize=0x%x, uFlags=0x%x, uAlign=0x%x)",
			  (IMG_UINTPTR_T)pBMContext, uRequestSize, uFlags, uDevVAddrAlignment));

	PVR_ASSERT (ppsMapping != IMG_NULL);
	PVR_ASSERT (pBMContext != IMG_NULL);

	if (ppsMapping == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter"));
		goto fail_exit;
	}

	uSize = HOST_PAGEALIGN (uRequestSize);
	PVR_ASSERT (uSize >= uRequestSize);

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
						sizeof (BM_MAPPING),
						(IMG_PVOID *)&pMapping, IMG_NULL,
						"Buffer Manager Mapping") != PVRSRV_OK)
	{
		PVR_DPF ((PVR_DBG_ERROR, "BM_ImportMemory: failed BM_MAPPING alloc"));
		goto fail_exit;
	}

	pMapping->hOSMemHandle = 0;
	pMapping->CpuVAddr = 0;
	pMapping->DevVAddr.uiAddr = 0;
	pMapping->CpuPAddr.uiAddr = 0;
	pMapping->uSize = uSize;
	pMapping->pBMHeap = pBMHeap;
	pMapping->ui32Flags = uFlags;

	
	if (pActualSize)
	{
		*pActualSize = uSize;
	}

	
	if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
	{
		uPSize = pBMHeap->sDevArena.ui32DataPageSize;
	}
	else
	{
		uPSize = pMapping->uSize;
	}

	

	if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
	{
		IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;

		
		if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
		{
			ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
			ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
		}

		
		if (OSAllocPages(ui32Attribs,
						 uPSize,
						 pBMHeap->sDevArena.ui32DataPageSize,
						 (IMG_VOID **)&pMapping->CpuVAddr,
						 &pMapping->hOSMemHandle) != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,
					"BM_ImportMemory: OSAllocPages(0x%x) failed",
					uPSize));
			goto fail_mapping_alloc;
		}

		
		pMapping->eCpuMemoryOrigin = hm_env;
	}
	else if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)
	{
		IMG_SYS_PHYADDR sSysPAddr;
		IMG_UINT32 ui32Attribs = pBMHeap->ui32Attribs;

		
		PVR_ASSERT(pBMHeap->pLocalDevMemArena != IMG_NULL);

		
		if (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK)
		{
			ui32Attribs &= ~PVRSRV_HAP_CACHETYPE_MASK;
			ui32Attribs |= (pMapping->ui32Flags & PVRSRV_HAP_CACHETYPE_MASK);
		}

		if (!RA_Alloc (pBMHeap->pLocalDevMemArena,
					   uPSize,
					   IMG_NULL,
					   IMG_NULL,
					   0,
					   pBMHeap->sDevArena.ui32DataPageSize,
					   0,
					   (IMG_UINTPTR_T *)&sSysPAddr.uiAddr))
		{
			PVR_DPF((PVR_DBG_ERROR, "BM_ImportMemory: RA_Alloc(0x%x) FAILED", uPSize));
			goto fail_mapping_alloc;
		}

		
		pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
		if(OSReservePhys(pMapping->CpuPAddr,
						 uPSize,
						 ui32Attribs,
						 &pMapping->CpuVAddr,
						 &pMapping->hOSMemHandle) != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,	"BM_ImportMemory: OSReservePhys failed"));
			goto fail_dev_mem_alloc;
		}

		
		pMapping->eCpuMemoryOrigin = hm_contiguous;
	}
	else
	{
		PVR_DPF((PVR_DBG_ERROR,	"BM_ImportMemory: Invalid backing store type"));
		goto fail_mapping_alloc;
	}

	
	bResult = DevMemoryAlloc (pBMContext,
								pMapping,
								IMG_NULL,
								uFlags,
								uDevVAddrAlignment,
								&pMapping->DevVAddr);
	if (!bResult)
	{
		PVR_DPF((PVR_DBG_ERROR,
				"BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
				pMapping->uSize));
		goto fail_dev_mem_alloc;
	}

	
	
	PVR_ASSERT (uDevVAddrAlignment>1?(pMapping->DevVAddr.uiAddr%uDevVAddrAlignment)==0:1);

	*pBase = pMapping->DevVAddr.uiAddr;
	*ppsMapping = pMapping;

	PVR_DPF ((PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE"));
	return IMG_TRUE;

fail_dev_mem_alloc:
	if (pMapping && (pMapping->CpuVAddr || pMapping->hOSMemHandle))
	{
		
		if(pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
		{
			pMapping->uSize /= 2;
		}

		if(pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
		{
			uPSize = pBMHeap->sDevArena.ui32DataPageSize;
		}
		else
		{
			uPSize = pMapping->uSize;
		}

		if(pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG)
		{
			OSFreePages(pBMHeap->ui32Attribs,
						  uPSize,
						  (IMG_VOID *)pMapping->CpuVAddr,
						  pMapping->hOSMemHandle);
		}
		else
		{
			IMG_SYS_PHYADDR sSysPAddr;

			if(pMapping->CpuVAddr)
			{
				OSUnReservePhys(pMapping->CpuVAddr,
								uPSize,
								pBMHeap->ui32Attribs,
								pMapping->hOSMemHandle);
			}
			sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
			RA_Free (pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr, IMG_FALSE);
		}
	}
fail_mapping_alloc:
	OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(BM_MAPPING), pMapping, IMG_NULL);
	
fail_exit:
	return IMG_FALSE;
}
Exemplo n.º 4
0
enum PVRSRV_ERROR MMU_Initialise(struct PVRSRV_DEVICE_NODE *psDeviceNode,
			    struct MMU_CONTEXT **ppsMMUContext,
			    struct IMG_DEV_PHYADDR *psPDDevPAddr)
{
	u32 *pui32Tmp;
	u32 i;
	void *pvPDCpuVAddr;
	struct IMG_DEV_PHYADDR sPDDevPAddr;
	struct IMG_CPU_PHYADDR sCpuPAddr;
	struct IMG_SYS_PHYADDR sSysPAddr;
	struct MMU_CONTEXT *psMMUContext;
	void *hPDOSMemHandle;
	struct SYS_DATA *psSysData;
	struct PVRSRV_SGXDEV_INFO *psDevInfo;

	PVR_DPF(PVR_DBG_MESSAGE, "MMU_Initialise");

	if (SysAcquireData(&psSysData) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR,
			 "MMU_Initialise: ERROR call to SysAcquireData failed");
		return PVRSRV_ERROR_GENERIC;
	}

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
		   sizeof(struct MMU_CONTEXT), (void **) &psMMUContext, NULL)
			!= PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR,
			 "MMU_Initialise: ERROR call to OSAllocMem failed");
		return PVRSRV_ERROR_GENERIC;
	}
	OSMemSet(psMMUContext, 0, sizeof(struct MMU_CONTEXT));

	psDevInfo = (struct PVRSRV_SGXDEV_INFO *)psDeviceNode->pvDevice;
	psMMUContext->psDevInfo = psDevInfo;

	psMMUContext->psDeviceNode = psDeviceNode;

	if (psDeviceNode->psLocalDevMemArena == NULL) {
		if (OSAllocPages
		    (PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
		     SGX_MMU_PAGE_SIZE, SGX_MMU_PAGE_SIZE, &pvPDCpuVAddr,
		     &hPDOSMemHandle) != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
					"ERROR call to OSAllocPages failed");
			goto err1;
		}

		if (pvPDCpuVAddr)
			sCpuPAddr = OSMapLinToCPUPhys(pvPDCpuVAddr);
		else
			sCpuPAddr = OSMemHandleToCpuPAddr(hPDOSMemHandle, 0);
		sPDDevPAddr =
		    SysCpuPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
	} else {
		if (RA_Alloc(psDeviceNode->psLocalDevMemArena,
			     SGX_MMU_PAGE_SIZE, NULL, 0, SGX_MMU_PAGE_SIZE,
			     &(sSysPAddr.uiAddr)) != IMG_TRUE) {
			PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
					"ERROR call to RA_Alloc failed");

			goto err1;
		}

		sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
		sPDDevPAddr =
		    SysSysPAddrToDevPAddr(PVRSRV_DEVICE_TYPE_SGX, sSysPAddr);
		pvPDCpuVAddr = (void __force *)
		    OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
				   PVRSRV_HAP_WRITECOMBINE |
				   PVRSRV_HAP_KERNEL_ONLY, &hPDOSMemHandle);
		if (!pvPDCpuVAddr) {
			PVR_DPF(PVR_DBG_ERROR, "MMU_Initialise: "
					"ERROR failed to map page tables");

			goto err2;
		}
	}

	PDUMPCOMMENT("Alloc page directory");

	PDUMPMALLOCPAGETABLE(pvPDCpuVAddr, PDUMP_PD_UNIQUETAG);

	if (pvPDCpuVAddr) {
		pui32Tmp = (u32 *) pvPDCpuVAddr;
	} else {
		PVR_DPF(PVR_DBG_ERROR,
			 "MMU_Initialise: pvPDCpuVAddr invalid");
		goto err3;
	}

	for (i = 0; i < SGX_MMU_PD_SIZE; i++)
		pui32Tmp[i] = 0;

	PDUMPCOMMENT("Page directory contents");
	PDUMPPAGETABLE(pvPDCpuVAddr, SGX_MMU_PAGE_SIZE, IMG_TRUE,
		       PDUMP_PD_UNIQUETAG, PDUMP_PT_UNIQUETAG);

	psMMUContext->pvPDCpuVAddr = pvPDCpuVAddr;
	psMMUContext->sPDDevPAddr = sPDDevPAddr;
	psMMUContext->hPDOSMemHandle = hPDOSMemHandle;

	*ppsMMUContext = psMMUContext;

	*psPDDevPAddr = sPDDevPAddr;

	psMMUContext->psNext = (struct MMU_CONTEXT *)
						psDevInfo->pvMMUContextList;
	psDevInfo->pvMMUContextList = (void *) psMMUContext;


	return PVRSRV_OK;
err3:
	if (psDeviceNode->psLocalDevMemArena)
		OSUnMapPhysToLin((void __iomem __force *)pvPDCpuVAddr,
				 SGX_MMU_PAGE_SIZE, PVRSRV_HAP_WRITECOMBINE |
					PVRSRV_HAP_KERNEL_ONLY,
				 hPDOSMemHandle);
err2:
	if (!psDeviceNode->psLocalDevMemArena)
		OSFreePages(PVRSRV_HAP_WRITECOMBINE | PVRSRV_HAP_KERNEL_ONLY,
			    SGX_MMU_PAGE_SIZE, pvPDCpuVAddr, hPDOSMemHandle);
	else
		RA_Free(psDeviceNode->psLocalDevMemArena,
			sSysPAddr.uiAddr, IMG_FALSE);
err1:
	OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct MMU_CONTEXT),
		  psMMUContext, NULL);

	return PVRSRV_ERROR_GENERIC;
}
Exemplo n.º 5
0
static IMG_BOOL _DeferredAllocPagetables(struct MMU_HEAP *pMMUHeap,
				struct IMG_DEV_VIRTADDR DevVAddr, u32 ui32Size)
{
	u32 ui32PTPageCount;
	u32 ui32PDIndex;
	u32 i;
	u32 *pui32PDEntry;
	struct MMU_PT_INFO **ppsPTInfoList;
	struct SYS_DATA *psSysData;
	struct IMG_DEV_VIRTADDR sHighDevVAddr;

	PVR_ASSERT(DevVAddr.uiAddr < (1 << SGX_FEATURE_ADDRESS_SPACE_SIZE));

	if (SysAcquireData(&psSysData) != PVRSRV_OK)
		return IMG_FALSE;

	ui32PDIndex =
	    DevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);

	if ((UINT32_MAX_VALUE - DevVAddr.uiAddr) <
	    (ui32Size + (1 << (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT)) - 1)) {

		sHighDevVAddr.uiAddr = UINT32_MAX_VALUE;
	} else {
		sHighDevVAddr.uiAddr = DevVAddr.uiAddr + ui32Size +
					(1 << (SGX_MMU_PAGE_SHIFT +
					       SGX_MMU_PT_SHIFT)) - 1;
	}

	ui32PTPageCount =
	    sHighDevVAddr.uiAddr >> (SGX_MMU_PAGE_SHIFT + SGX_MMU_PT_SHIFT);

	ui32PTPageCount -= ui32PDIndex;

	pui32PDEntry = (u32 *) pMMUHeap->psMMUContext->pvPDCpuVAddr;
	pui32PDEntry += ui32PDIndex;

	ppsPTInfoList = &pMMUHeap->psMMUContext->apsPTInfoList[ui32PDIndex];

	PDUMPCOMMENT("Alloc page table (page count == %08X)", ui32PTPageCount);
	PDUMPCOMMENT("Page directory mods (page count == %08X)",
		     ui32PTPageCount);

	for (i = 0; i < ui32PTPageCount; i++) {
		if (ppsPTInfoList[i] == NULL) {
			if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
				   sizeof(struct MMU_PT_INFO),
				   (void **) &ppsPTInfoList[i], NULL)
					!= PVRSRV_OK) {
				PVR_DPF(PVR_DBG_ERROR,
					"_DeferredAllocPagetables: "
					"ERROR call to OSAllocMem failed");
				return IMG_FALSE;
			}
			OSMemSet(ppsPTInfoList[i], 0,
				 sizeof(struct MMU_PT_INFO));
		}

		if (ppsPTInfoList[i]->hPTPageOSMemHandle == NULL &&
		    ppsPTInfoList[i]->PTPageCpuVAddr == NULL) {
			struct IMG_CPU_PHYADDR sCpuPAddr;
			struct IMG_DEV_PHYADDR sDevPAddr;

			PVR_ASSERT(pui32PDEntry[i] == 0);

			if (pMMUHeap->psDevArena->psDeviceMemoryHeapInfo->
			    psLocalDevMemArena == NULL) {
				if (OSAllocPages(PVRSRV_HAP_WRITECOMBINE |
						     PVRSRV_HAP_KERNEL_ONLY,
					     SGX_MMU_PAGE_SIZE,
					     SGX_MMU_PAGE_SIZE,
					     (void **)&ppsPTInfoList[i]->
						PTPageCpuVAddr,
					     &ppsPTInfoList[i]->
						hPTPageOSMemHandle) !=
				    PVRSRV_OK) {
					PVR_DPF(PVR_DBG_ERROR,
					   "_DeferredAllocPagetables: "
					   "ERROR call to OSAllocPages failed");
					return IMG_FALSE;
				}

				if (ppsPTInfoList[i]->PTPageCpuVAddr) {
					sCpuPAddr =
					    OSMapLinToCPUPhys(ppsPTInfoList[i]->
							      PTPageCpuVAddr);
				} else {
					sCpuPAddr =
					    OSMemHandleToCpuPAddr(
						ppsPTInfoList[i]->
							  hPTPageOSMemHandle,
						0);
				}
				sDevPAddr =
				    SysCpuPAddrToDevPAddr
					    (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);
			} else {
				struct IMG_SYS_PHYADDR sSysPAddr;

				if (RA_Alloc(pMMUHeap->psDevArena->
				     psDeviceMemoryHeapInfo->psLocalDevMemArena,
				     SGX_MMU_PAGE_SIZE, NULL, 0,
				     SGX_MMU_PAGE_SIZE,
				     &(sSysPAddr.uiAddr)) != IMG_TRUE) {
					PVR_DPF(PVR_DBG_ERROR,
					       "_DeferredAllocPagetables: "
					       "ERROR call to RA_Alloc failed");
					return IMG_FALSE;
				}

				sCpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
				ppsPTInfoList[i]->PTPageCpuVAddr =
				    (void __force *)
				    OSMapPhysToLin(sCpuPAddr, SGX_MMU_PAGE_SIZE,
						   PVRSRV_HAP_WRITECOMBINE |
						   PVRSRV_HAP_KERNEL_ONLY,
						   &ppsPTInfoList[i]->
						   hPTPageOSMemHandle);
				if (!ppsPTInfoList[i]->PTPageCpuVAddr) {
					PVR_DPF(PVR_DBG_ERROR,
					     "_DeferredAllocPagetables: "
					     "ERROR failed to map page tables");
					return IMG_FALSE;
				}

				sDevPAddr = SysCpuPAddrToDevPAddr
					    (PVRSRV_DEVICE_TYPE_SGX, sCpuPAddr);

			}


			OSMemSet(ppsPTInfoList[i]->PTPageCpuVAddr, 0,
				 SGX_MMU_PAGE_SIZE);

			PDUMPMALLOCPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
					     PDUMP_PT_UNIQUETAG);

			PDUMPPAGETABLE(ppsPTInfoList[i]->PTPageCpuVAddr,
				       SGX_MMU_PAGE_SIZE, IMG_TRUE,
				       PDUMP_PT_UNIQUETAG, PDUMP_PT_UNIQUETAG);

			switch (pMMUHeap->psDevArena->DevMemHeapType) {
			case DEVICE_MEMORY_HEAP_SHARED:
			case DEVICE_MEMORY_HEAP_SHARED_EXPORTED:
				{
					struct MMU_CONTEXT *psMMUContext =
					    (struct MMU_CONTEXT *)pMMUHeap->
						    psMMUContext->psDevInfo->
							    pvMMUContextList;

					while (psMMUContext) {
						pui32PDEntry =
						    (u32 *)psMMUContext->
								pvPDCpuVAddr;
						pui32PDEntry += ui32PDIndex;

						pui32PDEntry[i] =
						    sDevPAddr.uiAddr |
							SGX_MMU_PDE_VALID;

						PDUMPPAGETABLE
						    ((void *)&pui32PDEntry[i],
						     sizeof(u32), IMG_FALSE,
						     PDUMP_PD_UNIQUETAG,
						     PDUMP_PT_UNIQUETAG);

						psMMUContext =
						    psMMUContext->psNext;
					}
					break;
				}
			case DEVICE_MEMORY_HEAP_PERCONTEXT:
			case DEVICE_MEMORY_HEAP_KERNEL:
				{
					pui32PDEntry[i] = sDevPAddr.uiAddr |
							     SGX_MMU_PDE_VALID;

					PDUMPPAGETABLE((void *)&pui32PDEntry[i],
						       sizeof(u32), IMG_FALSE,
						       PDUMP_PD_UNIQUETAG,
						       PDUMP_PT_UNIQUETAG);

					break;
				}
			default:
				{
					PVR_DPF(PVR_DBG_ERROR,
						"_DeferredAllocPagetables: "
						"ERROR invalid heap type");
					return IMG_FALSE;
				}
			}


			MMU_InvalidateDirectoryCache(pMMUHeap->psMMUContext->
						     psDevInfo);
		} else {

			PVR_ASSERT(pui32PDEntry[i] != 0);
		}
	}

	return IMG_TRUE;
}
static IMG_BOOL AllocMemory(struct BM_CONTEXT *pBMContext,
		struct BM_HEAP *psBMHeap, struct IMG_DEV_VIRTADDR *psDevVAddr,
		size_t uSize, u32 uFlags, u32 uDevVAddrAlignment,
		struct BM_BUF *pBuf)
{
	struct BM_MAPPING *pMapping;
	u32 uOffset;
	struct RA_ARENA *pArena = NULL;

	PVR_DPF(PVR_DBG_MESSAGE, "AllocMemory "
			"(pBMContext=%08X, uSize=0x%x, uFlags=0x%x, "
			"align=0x%x, pBuf=%08X)",
		 pBMContext, uSize, uFlags, uDevVAddrAlignment, pBuf);

	if (uFlags & PVRSRV_MEM_RAM_BACKED_ALLOCATION) {
		if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
			PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
				"combination of DevVAddr management and "
				"RAM backing mode unsupported");
			return IMG_FALSE;
		}

		if (psBMHeap->ui32Attribs &
		    (PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG |
		     PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG)) {
			pArena = psBMHeap->pImportArena;
		} else {
			PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
				"backing store type doesn't match heap");
			return IMG_FALSE;
		}

		if (!RA_Alloc(pArena, uSize, (void *)&pMapping, uFlags,
			      uDevVAddrAlignment,
			      (u32 *)&(pBuf->DevVAddr.uiAddr))) {
			PVR_DPF(PVR_DBG_ERROR,
				 "AllocMemory: RA_Alloc(0x%x) FAILED", uSize);
			return IMG_FALSE;
		}

		uOffset = pBuf->DevVAddr.uiAddr - pMapping->DevVAddr.uiAddr;
		if (pMapping->CpuVAddr) {
			pBuf->CpuVAddr =
			    (void *)((u32) pMapping->CpuVAddr + uOffset);
		} else {
			pBuf->CpuVAddr = NULL;
		}

		if (uSize == pMapping->uSize) {
			pBuf->hOSMemHandle = pMapping->hOSMemHandle;
		} else {
			if (OSGetSubMemHandle(pMapping->hOSMemHandle, uOffset,
					uSize, psBMHeap->ui32Attribs,
					&pBuf->hOSMemHandle) != PVRSRV_OK) {
				PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
						"OSGetSubMemHandle FAILED");
				return IMG_FALSE;
			}
		}

		pBuf->CpuPAddr.uiAddr = pMapping->CpuPAddr.uiAddr + uOffset;

		if (uFlags & PVRSRV_MEM_ZERO)
			if (!ZeroBuf(pBuf, pMapping, uSize,
				     psBMHeap->ui32Attribs | uFlags))
				return IMG_FALSE;
	} else {
		if (uFlags & PVRSRV_MEM_USER_SUPPLIED_DEVVADDR) {
			PVR_ASSERT(psDevVAddr != NULL);

			if (psDevVAddr == NULL) {
				PVR_DPF(PVR_DBG_ERROR, "AllocMemory: "
					"invalid parameter - psDevVAddr");
				return IMG_FALSE;
			}

			pBMContext->psDeviceNode->pfnMMUAlloc(
					      psBMHeap->pMMUHeap, uSize,
					      PVRSRV_MEM_USER_SUPPLIED_DEVVADDR,
					      uDevVAddrAlignment, psDevVAddr);
			pBuf->DevVAddr = *psDevVAddr;
		} else {
			pBMContext->psDeviceNode->pfnMMUAlloc(psBMHeap->
							     pMMUHeap, uSize, 0,
							     uDevVAddrAlignment,
							     &pBuf->DevVAddr);
		}

		if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP,
			       sizeof(struct BM_MAPPING),
			       (void **)&pMapping, NULL) != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR,
				 "AllocMemory: OSAllocMem(0x%x) FAILED");
			return IMG_FALSE;
		}

		pBuf->CpuVAddr = NULL;
		pBuf->hOSMemHandle = NULL;
		pBuf->CpuPAddr.uiAddr = 0;

		pMapping->CpuVAddr = NULL;
		pMapping->CpuPAddr.uiAddr = 0;
		pMapping->DevVAddr = pBuf->DevVAddr;
		pMapping->psSysAddr = NULL;
		pMapping->uSize = uSize;
		pMapping->hOSMemHandle = NULL;
	}

	pMapping->pArena = pArena;

	pMapping->pBMHeap = psBMHeap;
	pBuf->pMapping = pMapping;

	PVR_DPF(PVR_DBG_MESSAGE, "AllocMemory: "
		"pMapping=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
		 pMapping, pMapping->DevVAddr.uiAddr, pMapping->CpuVAddr,
		 pMapping->CpuPAddr.uiAddr, pMapping->uSize);

	PVR_DPF(PVR_DBG_MESSAGE, "AllocMemory: "
		 "pBuf=%08X: DevV=%08X CpuV=%08X CpuP=%08X uSize=0x%x",
		 pBuf, pBuf->DevVAddr.uiAddr, pBuf->CpuVAddr,
		 pBuf->CpuPAddr.uiAddr, uSize);

	PVR_ASSERT(((pBuf->DevVAddr.uiAddr) & (uDevVAddrAlignment - 1)) == 0);

	return IMG_TRUE;
}
static IMG_BOOL BM_ImportMemory(void *pH, size_t uRequestSize,
		size_t *pActualSize, struct BM_MAPPING **ppsMapping,
		u32 uFlags, u32 *pBase)
{
	struct BM_MAPPING *pMapping;
	struct BM_HEAP *pBMHeap = pH;
	struct BM_CONTEXT *pBMContext = pBMHeap->pBMContext;
	IMG_BOOL bResult;
	size_t uSize;
	size_t uPSize;
	u32 uDevVAddrAlignment = 0;

	PVR_DPF(PVR_DBG_MESSAGE,
		"BM_ImportMemory (pBMContext=%08X, uRequestSize=0x%x, "
		"uFlags=0x%x, uAlign=0x%x)",
		 pBMContext, uRequestSize, uFlags, uDevVAddrAlignment);

	PVR_ASSERT(ppsMapping != NULL);
	PVR_ASSERT(pBMContext != NULL);

	if (ppsMapping == NULL) {
		PVR_DPF(PVR_DBG_ERROR, "BM_ImportMemory: invalid parameter");
		goto fail_exit;
	}

	uSize = HOST_PAGEALIGN(uRequestSize);
	PVR_ASSERT(uSize >= uRequestSize);

	if (OSAllocMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING),
		       (void **)&pMapping, NULL) != PVRSRV_OK) {
		PVR_DPF(PVR_DBG_ERROR,
			 "BM_ImportMemory: failed struct BM_MAPPING alloc");
		goto fail_exit;
	}

	pMapping->hOSMemHandle = NULL;
	pMapping->CpuVAddr = NULL;
	pMapping->DevVAddr.uiAddr = 0;
	pMapping->CpuPAddr.uiAddr = 0;
	pMapping->uSize = uSize;
	pMapping->pBMHeap = pBMHeap;
	pMapping->ui32Flags = uFlags;

	if (pActualSize)
		*pActualSize = uSize;

	if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
		uPSize = pBMHeap->sDevArena.ui32DataPageSize;
	else
		uPSize = pMapping->uSize;

	if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
		if (OSAllocPages(pBMHeap->ui32Attribs, uPSize,
				 pBMHeap->sDevArena.ui32DataPageSize,
				 (void **)&pMapping->CpuVAddr,
				 &pMapping->hOSMemHandle) != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR,
				 "BM_ImportMemory: OSAllocPages(0x%x) failed",
				 uPSize);
			goto fail_mapping_alloc;
		}

		pMapping->eCpuMemoryOrigin = hm_env;
	} else if (pBMHeap->ui32Attribs & PVRSRV_BACKINGSTORE_LOCALMEM_CONTIG) {
		struct IMG_SYS_PHYADDR sSysPAddr;

		PVR_ASSERT(pBMHeap->pLocalDevMemArena != NULL);

		if (!RA_Alloc(pBMHeap->pLocalDevMemArena, uPSize, NULL, 0,
			      pBMHeap->sDevArena.ui32DataPageSize,
			      (u32 *)&sSysPAddr.uiAddr)) {
			PVR_DPF(PVR_DBG_ERROR,
				 "BM_ImportMemory: RA_Alloc(0x%x) FAILED",
				 uPSize);
			goto fail_mapping_alloc;
		}

		pMapping->CpuPAddr = SysSysPAddrToCpuPAddr(sSysPAddr);
		if (OSReservePhys(pMapping->CpuPAddr, uPSize,
				  pBMHeap->ui32Attribs, &pMapping->CpuVAddr,
				  &pMapping->hOSMemHandle) != PVRSRV_OK) {
			PVR_DPF(PVR_DBG_ERROR,
				 "BM_ImportMemory: OSReservePhys failed");
			goto fail_dev_mem_alloc;
		}

		pMapping->eCpuMemoryOrigin = hm_contiguous;
	} else {
		PVR_DPF(PVR_DBG_ERROR,
			 "BM_ImportMemory: Invalid backing store type");
		goto fail_mapping_alloc;
	}

	bResult = DevMemoryAlloc(pBMContext, pMapping, uFlags,
				 uDevVAddrAlignment, &pMapping->DevVAddr);
	if (!bResult) {
		PVR_DPF(PVR_DBG_ERROR,
			 "BM_ImportMemory: DevMemoryAlloc(0x%x) failed",
			 pMapping->uSize);
		goto fail_dev_mem_alloc;
	}

	PVR_ASSERT(uDevVAddrAlignment > 1 ?
		   (pMapping->DevVAddr.uiAddr % uDevVAddrAlignment) == 0 : 1);

	*pBase = pMapping->DevVAddr.uiAddr;
	*ppsMapping = pMapping;

	PVR_DPF(PVR_DBG_MESSAGE, "BM_ImportMemory: IMG_TRUE");
	return IMG_TRUE;

fail_dev_mem_alloc:
	if (pMapping->CpuVAddr || pMapping->hOSMemHandle) {
		if (pMapping->ui32Flags & PVRSRV_MEM_INTERLEAVED)
			pMapping->uSize /= 2;

		if (pMapping->ui32Flags & PVRSRV_MEM_DUMMY)
			uPSize = pBMHeap->sDevArena.ui32DataPageSize;
		else
			uPSize = pMapping->uSize;

		if (pBMHeap->ui32Attribs &
			PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG) {
			OSFreePages(pBMHeap->ui32Attribs, uPSize,
				    (void *)pMapping->CpuVAddr,
				    pMapping->hOSMemHandle);
		} else {
			struct IMG_SYS_PHYADDR sSysPAddr;

			if (pMapping->CpuVAddr)
				OSUnReservePhys(pMapping->CpuVAddr, uPSize,
						pBMHeap->ui32Attribs,
						pMapping->hOSMemHandle);
			sSysPAddr = SysCpuPAddrToSysPAddr(pMapping->CpuPAddr);
			RA_Free(pBMHeap->pLocalDevMemArena, sSysPAddr.uiAddr,
				IMG_FALSE);
		}
	}
fail_mapping_alloc:
	OSFreeMem(PVRSRV_OS_PAGEABLE_HEAP, sizeof(struct BM_MAPPING), pMapping,
		  NULL);
fail_exit:
	return IMG_FALSE;
}