示例#1
0
static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection,
											 PVRSRV_DEVICE_NODE *psDeviceNode,
											 DEVMEM_MEMDESC *psFWMemContextMemDesc,
											 IMG_UINT32 ui32Priority,
											 RGX_COMMON_CONTEXT_INFO *psInfo,
											 RGX_SERVER_TQ_3D_DATA *ps3DData)
{
	PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
	PVRSRV_ERROR eError;

	/*
		Allocate device memory for the firmware GPU context suspend state.
		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
	*/
	PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state");

	eError = DevmemFwAllocate(psDevInfo,
							sizeof(RGXFWIF_3DCTX_STATE),
							RGX_FWCOMCTX_ALLOCFLAGS,
							"FirmwareTQ3DContext",
							&ps3DData->psFWContextStateMemDesc);
	if (eError != PVRSRV_OK)
	{
		goto fail_contextswitchstate;
	}

	eError = FWCommonContextAllocate(psConnection,
									 psDeviceNode,
									 "TQ_3D",
									 IMG_NULL,
									 0,
									 psFWMemContextMemDesc,
									 ps3DData->psFWContextStateMemDesc,
									 RGX_CCB_SIZE_LOG2,
									 ui32Priority,
									 psInfo,
									 &ps3DData->psServerCommonContext);
	if (eError != PVRSRV_OK)
	{
		goto fail_contextalloc;
	}


	PDUMPCOMMENT("Dump 3D context suspend state buffer");
	DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS);

	ps3DData->ui32Priority = ui32Priority;
	return PVRSRV_OK;

fail_contextalloc:
	DevmemFwFree(ps3DData->psFWContextStateMemDesc);
fail_contextswitchstate:
	PVR_ASSERT(eError != PVRSRV_OK);
	return eError;
}
/******************************************************************************
 FUNCTION	: RGXReleaseCCB

 PURPOSE	: Release a CCB that we have been writing to.

 PARAMETERS	: psDevData			- device data
  			  psCCB				- the CCB

 RETURNS	: None
******************************************************************************/
IMG_INTERNAL IMG_VOID RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
									IMG_UINT32		ui32CmdSize,
									IMG_BOOL		bPDumpContinuous)
{
	IMG_UINT32	ui32PDumpFlags	= bPDumpContinuous ? PDUMP_FLAGS_CONTINUOUS : 0;
	IMG_BOOL	bInCaptureRange;
	IMG_BOOL	bPdumpEnabled;

	PDumpIsCaptureFrameKM(&bInCaptureRange);
	bPdumpEnabled = (bInCaptureRange || bPDumpContinuous);

	/* Dump the CCB data */
	if (bPdumpEnabled)
	{
		DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
						   psClientCCB->ui32HostWriteOffset,
						   ui32CmdSize,
						   ui32PDumpFlags);
	}
	/*
	 * Update the CCB write offset.
	 */
	UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
					  ui32CmdSize,
					  psClientCCB->ui32Size);

	/*
		PDumpSetFrame will detect as we Transition out of capture range for
		frame based data but if we are PDumping continuous data then we
		need to inform the PDump layer ourselves
	*/
	if (bPDumpContinuous && !bInCaptureRange)
	{
		PVRSRV_ERROR eError;

		/* Only Transitioning into capture range can cause an error */
		eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_FALSE, IMG_TRUE);
		PVR_ASSERT(eError == PVRSRV_OK);
	}

	if (bPdumpEnabled)
	{
		/* Update the PDump write offset to show we PDumped this command */
		psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
	}

#if defined(NO_HARDWARE)
	/*
		The firmware is not running, it cannot update these; we do here instead.
	*/
	psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
	psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
#endif
}
示例#3
0
static
IMG_BOOL _PDumpSyncBlock(PDLLIST_NODE psNode, IMG_PVOID pvCallbackData)
{
	SYNC_PRIMITIVE_BLOCK *psSyncBlock = IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode);
	PVR_UNREFERENCED_PARAMETER(pvCallbackData);

	DevmemPDumpLoadMem(psSyncBlock->psMemDesc,
					   0,
					   psSyncBlock->ui32BlockSize,
					   PDUMP_FLAGS_CONTINUOUS);
	return IMG_TRUE;
}
示例#4
0
PVRSRV_ERROR
PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
{
	/*
		We might be ask to PDump sync state outside of capture range
		(e.g. texture uploads) so make this continuous.
	*/
	DevmemPDumpLoadMem(psSyncBlk->psMemDesc,
					   ui32Offset,
					   sizeof(IMG_UINT32),
					   PDUMP_FLAGS_CONTINUOUS);

	return PVRSRV_OK;
}
示例#5
0
static PVRSRV_ERROR _RGXCCBPDumpTransition(IMG_PVOID *pvData, IMG_BOOL bInto, IMG_BOOL bContinuous)
{
	RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData;
	
	IMG_UINT32 ui32PDumpFlags = bContinuous ? PDUMP_FLAGS_CONTINUOUS:0;

	/*
		We're about to Transition into capture range and we've submitted
		new commands since the last time we entered capture range so drain
		the CCB as required
	*/
	if (bInto)
	{
		volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl;
		PVRSRV_ERROR eError;

		/*
			Wait for the FW to catch up (retry will get pushed back out services
			client where we wait on the event object and try again later)
		*/
		if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset)
		{
			return PVRSRV_ERROR_RETRY;
		}

		/*
			We drain whenever capture range is entered. Even if no commands
			have been issued while where out of capture range we have to wait for
			operations that we might have issued in the last capture range
			to finish so the sync prim update that will happen after all the
			PDumpTransition callbacks have been called doesn't clobber syncs
			which the FW is currently working on.
			Although this is suboptimal, while out of capture range for every
			persistent operation we serialise the PDump script processing and
			the FW, there is no easy solution.
			Not all modules that work on syncs register a PDumpTransition and
			thus we have no way of knowing if we can skip drain and the sync
			prim dump or not.
		*/
		PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
							  "cCCB(%s@%p): Draining rgxfw_roff == woff (%d)",
							  psClientCCB->szName,
							  psClientCCB,
							  psClientCCB->ui32LastPDumpWriteOffset);

		eError = DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc,
										offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
										psClientCCB->ui32LastPDumpWriteOffset,
										0xffffffff,
										PDUMP_POLL_OPERATOR_EQUAL,
										ui32PDumpFlags);
		if (eError != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_WARNING, "_RGXCCBPDumpTransition: problem pdumping POL for cCCBCtl (%d)", eError));
		}
		PVR_ASSERT(eError == PVRSRV_OK);

		/*
			If new command(s) have been written out of capture range then we
			need to fast forward past uncaptured operations.
		*/
		if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset)
		{
			/*
				There are commands that where not captured so after the
				simulation drain (above) we also need to fast-forward pass
				those commands so the FW can start with the 1st command
				which is in the new capture range
			 */
			psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
			psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
			psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset;
	
			PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
								  "cCCB(%s@%p): Fast-forward from %d to %d",
								  psClientCCB->szName,
								  psClientCCB,
								  psClientCCB->ui32LastPDumpWriteOffset,
								  psClientCCB->ui32HostWriteOffset);
	
			DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
							   0,
							   sizeof(RGXFWIF_CCCB_CTL),
							   ui32PDumpFlags);
							   
			/*
				Although we've entered capture range we might not do any work
				on this CCB so update the ui32LastPDumpWriteOffset to reflect
				where we got to for next so we start the drain from where we
				got to last time
			*/
			psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
		}
	}
	return PVRSRV_OK;
}
示例#6
0
/******************************************************************************
 FUNCTION	: RGXReleaseCCB

 PURPOSE	: Release a CCB that we have been writing to.

 PARAMETERS	: psDevData			- device data
  			  psCCB				- the CCB

 RETURNS	: None
******************************************************************************/
IMG_INTERNAL IMG_VOID RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
									IMG_UINT32		ui32CmdSize,
									IMG_BOOL		bPDumpContinuous)
{
	IMG_UINT32	ui32PDumpFlags	= bPDumpContinuous ? PDUMP_FLAGS_CONTINUOUS : 0;
	IMG_BOOL	bInCaptureRange;
	IMG_BOOL	bPdumpEnabled;

	PDumpIsCaptureFrameKM(&bInCaptureRange);
	bPdumpEnabled = (bInCaptureRange || bPDumpContinuous);

	/* Dump the CCB data */
	if (bPdumpEnabled)
	{
		DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
						   psClientCCB->ui32HostWriteOffset,
						   ui32CmdSize,
						   ui32PDumpFlags);
	}
	
	/*
	 *  Check if there have been any fences written that will already be
	 *  satistified by a previously written update in this CCB.
	 */
#if defined REDUNDANT_SYNCS_DEBUG
	{
		IMG_UINT8  *pui8BufferStart = (IMG_PVOID)((IMG_UINTPTR_T)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
		IMG_UINT8  *pui8BufferEnd   = (IMG_PVOID)((IMG_UINTPTR_T)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset + ui32CmdSize);

		/* Walk through the commands in this section of CCB being released... */
		while (pui8BufferStart < pui8BufferEnd)
		{
			RGXFWIF_CCB_CMD_HEADER  *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8BufferStart;

			if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
			{
				/* If an UPDATE then record the value incase a later fence depends on it. */
				IMG_UINT32  ui32NumUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
				IMG_UINT32  i;

				for (i = 0;  i < ui32NumUpdates;  i++)
				{
					RGXFWIF_UFO  *psUFOPtr = ((RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER))) + i;
					
					psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateWriteIndex++] = *psUFOPtr;
					psClientCCB->ui32UpdateWriteIndex &= (RGX_CCCB_FENCE_UPDATE_LIST_SIZE-1);
				}
			}
			else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE)
			{
				IMG_UINT32  ui32NumFences = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
				IMG_UINT32  i;
				
				for (i = 0;  i < ui32NumFences;  i++)
				{
					RGXFWIF_UFO  *psUFOPtr = ((RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER))) + i;
					IMG_UINT32  ui32UpdateIndex;

					/* Check recently queued updates to see if this fence will be satisfied by the time it is checked. */
					for (ui32UpdateIndex = 0;  ui32UpdateIndex < RGX_CCCB_FENCE_UPDATE_LIST_SIZE;  ui32UpdateIndex++)
					{
						RGXFWIF_UFO  *psUpdatePtr = &psClientCCB->asFenceUpdateList[ui32UpdateIndex];
							
						if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr  &&
							psUFOPtr->ui32Value == psUpdatePtr->ui32Value)
						{
							PVR_DPF((PVR_DBG_WARNING, "Redundant fence found in cCCB(%p) - 0x%x -> 0x%x",
									psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
							//psUFOPtr->puiAddrUFO.ui32Addr = 0;
							break;
						}
					}
				}
			}
			else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)
			{
				IMG_UINT32  ui32NumFences = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
				IMG_UINT32  i;
				
				for (i = 0;  i < ui32NumFences;  i++)
				{
					RGXFWIF_UFO  *psUFOPtr = ((RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER))) + i;
					IMG_UINT32  ui32UpdateIndex;
							
					/* Check recently queued updates to see if this fence will be satisfied by the time it is checked. */
					for (ui32UpdateIndex = 0;  ui32UpdateIndex < RGX_CCCB_FENCE_UPDATE_LIST_SIZE;  ui32UpdateIndex++)
					{
						RGXFWIF_UFO  *psUpdatePtr = &psClientCCB->asFenceUpdateList[ui32UpdateIndex];
						
						/*
						 *  The PR-fence will be met if the update value is >= the required fence value. E.g.
						 *  the difference between the update value and fence value is positive.
						 */
						if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr  &&
							((psUpdatePtr->ui32Value - psUFOPtr->ui32Value) & (1U << 31)) == 0)
						{
							PVR_DPF((PVR_DBG_WARNING, "Redundant PR fence found in cCCB(%p) - 0x%x -> 0x%x",
									psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
							//psUFOPtr->puiAddrUFO.ui32Addr = 0;
							break;
						}
					}
				}
			}

			/* Move to the next command in this section of CCB being released... */
			pui8BufferStart += sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize;
		}
	}
#endif /* REDUNDANT_SYNCS_DEBUG */

	/*
	 * Update the CCB write offset.
	 */
	UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
					  ui32CmdSize,
					  psClientCCB->ui32Size);

	/*
		PDumpSetFrame will detect as we Transition out of capture range for
		frame based data but if we are PDumping continuous data then we
		need to inform the PDump layer ourselves
	*/
	if (bPDumpContinuous && !bInCaptureRange)
	{
		PVRSRV_ERROR eError;

		/* Only Transitioning into capture range can cause an error */
		eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_FALSE, IMG_TRUE);
		PVR_ASSERT(eError == PVRSRV_OK);
	}

	if (bPdumpEnabled)
	{
		/* Update the PDump write offset to show we PDumped this command */
		psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
	}

#if defined(NO_HARDWARE)
	/*
		The firmware is not running, it cannot update these; we do here instead.
	*/
	psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
	psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
#endif
}
示例#7
0
/******************************************************************************
 FUNCTION	: RGXAcquireCCB

 PURPOSE	: Obtains access to write some commands to a CCB

 PARAMETERS	: psClientCCB		- The client CCB
			  ui32CmdSize		- How much space is required
			  ppvBufferSpace	- Pointer to space in the buffer
			  bPDumpContinuous  - Should this be PDump continuous?

 RETURNS	: PVRSRV_ERROR
******************************************************************************/
IMG_INTERNAL PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
										IMG_UINT32		ui32CmdSize,
										IMG_PVOID		*ppvBufferSpace,
										IMG_BOOL		bPDumpContinuous)
{
	PVRSRV_ERROR eError;
	IMG_UINT32	ui32PDumpFlags	= bPDumpContinuous ? PDUMP_FLAGS_CONTINUOUS : 0;
	IMG_BOOL	bInCaptureRange;
	IMG_BOOL	bPdumpEnabled;

	PDumpIsCaptureFrameKM(&bInCaptureRange);
	bPdumpEnabled = (bInCaptureRange || bPDumpContinuous);

	/*
		PDumpSetFrame will detect as we Transition into capture range for
		frame based data but if we are PDumping continuous data then we
		need to inform the PDump layer ourselves
	*/
	if (bPDumpContinuous && !bInCaptureRange)
	{
		eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_TRUE, IMG_TRUE);
		if (eError != PVRSRV_OK)
		{
			return eError;
		}
	}

	/* Check that the CCB can hold this command + padding */
	if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size)
	{
		PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)\n",
								ui32CmdSize, psClientCCB->ui32Size));
		return PVRSRV_ERROR_CMD_TOO_BIG;
	}

	/*
		Check we don't overflow the end of the buffer and make sure we have
		enough for the padding command.
	*/
	if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) >
		psClientCCB->ui32Size)
	{
		RGXFWIF_CCB_CMD_HEADER *psHeader;
		IMG_VOID *pvHeader;
		PVRSRV_ERROR eError;
		IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;

		/* We're at the end of the buffer without enough contiguous space */
		eError = _RGXAcquireCCB(psClientCCB,
								ui32Remain,
								&pvHeader);
		if (eError != PVRSRV_OK)
		{
			/*
				It's possible no commands have been processed in which case as we
				can fail the padding allocation due to that fact we never allow
				the client CCB to be full
			*/
			return eError;
		}
		psHeader = pvHeader;
		psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING;
		psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER);

		PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize);
		if (bPdumpEnabled)
		{
			DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
							   psClientCCB->ui32HostWriteOffset,
							   ui32Remain,
							   ui32PDumpFlags);
		}
				
		UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
						  ui32Remain,
						  psClientCCB->ui32Size);
	}

	return _RGXAcquireCCB(psClientCCB,
						  ui32CmdSize,
						  ppvBufferSpace);
}
示例#8
0
PVRSRV_ERROR RGXCreateCCB(PVRSRV_DEVICE_NODE	*psDeviceNode,
						  IMG_UINT32			ui32CCBSizeLog2,
						  CONNECTION_DATA		*psConnectionData,
						  const IMG_CHAR		*pszName,
						  RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
						  RGX_CLIENT_CCB		**ppsClientCCB,
						  DEVMEM_MEMDESC 		**ppsClientCCBMemDesc,
						  DEVMEM_MEMDESC 		**ppsClientCCBCtrlMemDesc)
{
	PVRSRV_ERROR	eError;
	DEVMEM_FLAGS_T	uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags;
	IMG_UINT32		ui32AllocSize = (1U << ui32CCBSizeLog2);
	RGX_CLIENT_CCB	*psClientCCB;

	psClientCCB = OSAllocMem(sizeof(*psClientCCB));
	if (psClientCCB == IMG_NULL)
	{
		eError = PVRSRV_ERROR_OUT_OF_MEMORY;
		goto fail_alloc;
	}
	psClientCCB->psServerCommonContext = psServerCommonContext;

	uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
	                            PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
								PVRSRV_MEMALLOCFLAG_CPU_READABLE |
								PVRSRV_MEMALLOCFLAG_UNCACHED |
								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
								PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;

	uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
								PVRSRV_MEMALLOCFLAG_GPU_READABLE |
								PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
								PVRSRV_MEMALLOCFLAG_CPU_READABLE |
								
								PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | 
								PVRSRV_MEMALLOCFLAG_UNCACHED |
								PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
								PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;

	PDUMPCOMMENT("Allocate RGXFW cCCB");
	eError = DevmemFwAllocateExportable(psDeviceNode,
										ui32AllocSize,
										uiClientCCBMemAllocFlags,
										"FirmwareClientCCB",
										&psClientCCB->psClientCCBMemDesc);

	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB (%s)",
				PVRSRVGetErrorStringKM(eError)));
		goto fail_alloc_ccb;
	}

	eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
									  (IMG_VOID **) &psClientCCB->pui8ClientCCB);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
				PVRSRVGetErrorStringKM(eError)));
		goto fail_map_ccb;
	}

	PDUMPCOMMENT("Allocate RGXFW cCCB control");
	eError = DevmemFwAllocateExportable(psDeviceNode,
										sizeof(RGXFWIF_CCCB_CTL),
										uiClientCCBCtlMemAllocFlags,
										"FirmwareClientCCBControl",
										&psClientCCB->psClientCCBCtrlMemDesc);

	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB control (%s)",
				PVRSRVGetErrorStringKM(eError)));
		goto fail_alloc_ccbctrl;
	}

	eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc,
									  (IMG_VOID **) &psClientCCB->psClientCCBCtrl);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
				PVRSRVGetErrorStringKM(eError)));
		goto fail_map_ccbctrl;
	}

	psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0;
	psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0;
	psClientCCB->psClientCCBCtrl->ui32DepOffset = 0;
	psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1;
	OSStringCopy(psClientCCB->szName, pszName);

	PDUMPCOMMENT("cCCB control");
	DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
					   0,
					   sizeof(RGXFWIF_CCCB_CTL),
					   PDUMP_FLAGS_CONTINUOUS);
	PVR_ASSERT(eError == PVRSRV_OK);

	psClientCCB->ui32HostWriteOffset = 0;
	psClientCCB->ui32LastPDumpWriteOffset = 0;
	psClientCCB->ui32Size = ui32AllocSize;

#if defined REDUNDANT_SYNCS_DEBUG
	psClientCCB->ui32UpdateWriteIndex = 0;
	OSMemSet(psClientCCB->asFenceUpdateList, 0, sizeof(psClientCCB->asFenceUpdateList));
#endif

	eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData,
											  _RGXCCBPDumpTransition,
											  psClientCCB,
											  &psClientCCB->hTransition);
	if (eError != PVRSRV_OK)
	{
		goto fail_pdumpreg;
	}
	/*
		Note:
		Due to resman the connection structure could be freed before the client
		CCB so rather then saving off the connection structure save the PDump
		specific memory which is refcounted to ensure it's not freed too early
	*/
	psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData;
	PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created",
				 psClientCCB->szName,
				 psClientCCB);

	*ppsClientCCB = psClientCCB;
	*ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc;
	*ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc;
	return PVRSRV_OK;

fail_pdumpreg:
	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
fail_map_ccbctrl:
	DevmemFwFree(psClientCCB->psClientCCBCtrlMemDesc);
fail_alloc_ccbctrl:
	DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
fail_map_ccb:
	DevmemFwFree(psClientCCB->psClientCCBMemDesc);
fail_alloc_ccb:
	OSFreeMem(psClientCCB);
fail_alloc:
	PVR_ASSERT(eError != PVRSRV_OK);
	return eError;
}
示例#9
0
/*
 * RGXRegisterMemoryContext
 */ 
PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE	*psDeviceNode,
									  MMU_CONTEXT			*psMMUContext,
									  IMG_HANDLE			*hPrivData)
{
	PVRSRV_ERROR			eError;
	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;
	DEVMEM_FLAGS_T			uiFWMemContextMemAllocFlags;
	RGXFWIF_FWMEMCONTEXT	*psFWMemContext;
	DEVMEM_MEMDESC			*psFWMemContextMemDesc;
	SERVER_MMU_CONTEXT *psServerMMUContext;

	if (psDevInfo->psKernelMMUCtx == IMG_NULL)
	{
		/*
		 * This must be the creation of the Kernel memory context. Take a copy
		 * of the MMU context for use when programming the BIF.
		 */ 
		psDevInfo->psKernelMMUCtx = psMMUContext;
	}
	else
	{
		psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext));
		if (psServerMMUContext == IMG_NULL)
		{
			eError = PVRSRV_ERROR_OUT_OF_MEMORY;
			goto fail_alloc_server_ctx;
		}

		psServerMMUContext->psDevInfo = psDevInfo;

		/*
		 * This FW MemContext is only mapped into kernel for initialisation purposes.
		 * Otherwise this allocation is only used by the FW.
		 * Therefore the GPU cache doesn't need coherency,
		 * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick)
		 */
		uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
										PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(META_CACHED) |
										PVRSRV_MEMALLOCFLAG_GPU_READABLE |
										PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
										PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
										PVRSRV_MEMALLOCFLAG_CPU_READABLE |
										PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
										PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
										PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;

		/*
			Allocate device memory for the firmware memory context for the new
			application.
		*/
		PDUMPCOMMENT("Allocate RGX firmware memory context");
		/* FIXME: why cache-consistent? */
		eError = DevmemFwAllocate(psDevInfo,
								sizeof(*psFWMemContext),
								uiFWMemContextMemAllocFlags,
								"FirmwareMemoryContext",
								&psFWMemContextMemDesc);

		if (eError != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to allocate firmware memory context (%u)",
					eError));
			goto fail_alloc_fw_ctx;
		}
		
		/*
			Temporarily map the firmware memory context to the kernel.
		*/
		eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc,
										  (IMG_VOID **)&psFWMemContext);
		if (eError != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to map firmware memory context (%u)",
					eError));
			goto fail_acquire_cpu_addr;
		}
		
		/*
		 * Write the new memory context's page catalogue into the firmware memory
		 * context for the client.
		 */
		eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr);
		if (eError != PVRSRV_OK)
		{
			PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
					eError));
			DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
			goto fail_acquire_base_addr;
		}

		/*
		 * Set default values for the rest of the structure.
		 */
		psFWMemContext->uiPageCatBaseRegID = -1;
		psFWMemContext->uiBreakpointAddr = 0;
		psFWMemContext->uiBPHandlerAddr = 0;
		psFWMemContext->uiBreakpointCtl = 0;

#if defined(SUPPORT_GPUVIRT_VALIDATION)
{
		IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;

		MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg);

		psFWMemContext->ui32OSid = ui32OSidReg;
}
#endif

#if defined(PDUMP)
		{
			IMG_CHAR			aszName[PMR_MAX_MEMSPNAME_SYMB_ADDR_LENGTH_DEFAULT];
			IMG_DEVMEM_OFFSET_T uiOffset = 0;

			/*
			 * Dump the Mem context allocation
			 */
			DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS);
			

			/*
			 * Obtain a symbolic addr of the mem context structure
			 */
			eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, 
												   &uiOffset, 
												   aszName, 
												   PMR_MAX_MEMSPNAME_SYMB_ADDR_LENGTH_DEFAULT);

			if (eError != PVRSRV_OK)
			{
				PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to generate a Dump Page Catalogue address (%u)",
						eError));
				DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
				goto fail_pdump_cat_base_addr;
			}

			/*
			 * Dump the Page Cat tag in the mem context (symbolic address)
			 */
			eError = MMU_PDumpWritePageCatBase(psMMUContext,
												aszName,
												uiOffset,
												8, /* 64-bit register write */
												0,
												0,
												0);
			if (eError != PVRSRV_OK)
			{
				PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
						eError));
				DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
				goto fail_pdump_cat_base;
			}
		}
#endif

		/*
		 * Release kernel address acquired above.
		 */
		DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);

		/*
		 * Store the process information for this device memory context
		 * for use with the host page-fault analysis.
		 */
		psServerMMUContext->uiPID = OSGetCurrentProcessID();
		psServerMMUContext->psMMUContext = psMMUContext;
		psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
		if (OSSNPrintf(psServerMMUContext->szProcessName,
						RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME,
						"%s",
						OSGetCurrentProcessName()) == RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME)
		{
			psServerMMUContext->szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME-1] = '\0';
		}

		OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
		dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode);
		OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);

		MMU_SetDeviceData(psMMUContext, psFWMemContextMemDesc);
		*hPrivData = psServerMMUContext;
	}
			
	return PVRSRV_OK;

#if defined(PDUMP)
fail_pdump_cat_base:
fail_pdump_cat_base_addr:
	MMU_ReleaseBaseAddr(IMG_NULL);
#endif
fail_acquire_base_addr:
	/* Done before jumping to the fail point as the release is done before exit */
fail_acquire_cpu_addr:
	DevmemFwFree(psServerMMUContext->psFWMemContextMemDesc);
fail_alloc_fw_ctx:
	OSFreeMem(psServerMMUContext);
fail_alloc_server_ctx:
	PVR_ASSERT(eError != PVRSRV_OK);
	return eError;
}
示例#10
0
IMG_EXPORT
PVRSRV_ERROR PVRSRVRGXCreateTQ2DContextKM(PVRSRV_DEVICE_NODE		*psDeviceNode,
										  DEVMEM_MEMDESC 			*psTQ2DCCBMemDesc,
										  DEVMEM_MEMDESC 			*psTQ2DCCBCtlMemDesc,
										  RGX_TQ2D_CLEANUP_DATA		**ppsCleanupData,
										  DEVMEM_MEMDESC 			**ppsFWTQ2DContextMemDesc,
										  IMG_UINT32				ui32Priority,
										  IMG_UINT32				ui32FrameworkRegisterSize,
										  IMG_PBYTE					pbyFrameworkRegisters,
										  IMG_HANDLE				hMemCtxPrivData)
{
	PVRSRV_ERROR			eError = PVRSRV_OK;
	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;	
	RGXFWIF_FWCOMMONCONTEXT	*psFWTQ2DContext;
	RGX_TQ2D_CLEANUP_DATA	*psTmpCleanup;
	DEVMEM_MEMDESC 			*psFWFrameworkMemDesc;

	/* Prepare cleanup struct */
	psTmpCleanup = OSAllocMem(sizeof(*psTmpCleanup));
	if (psTmpCleanup == IMG_NULL)
	{
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}

	OSMemSet(psTmpCleanup, 0, sizeof(*psTmpCleanup));
	*ppsCleanupData = psTmpCleanup;

	/* Allocate cleanup sync */
	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
						   &psTmpCleanup->psCleanupSync);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate cleanup sync (0x%x)",
				eError));
		goto fail_syncalloc;
	}

	/*
		Allocate device memory for the firmware TQ 2D context.
	*/
	PDUMPCOMMENT("Allocate RGX firmware TQ 2D context");
	
	eError = DevmemFwAllocate(psDevInfo,
							sizeof(*psFWTQ2DContext),
							RGX_FWCOMCTX_ALLOCFLAGS,
							ppsFWTQ2DContextMemDesc);

	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to allocate firmware TQ 2D context (%u)",
				eError));
		goto fail_contextalloc;
	}
	psTmpCleanup->psFWTQ2DContextMemDesc = *ppsFWTQ2DContextMemDesc;
	psTmpCleanup->psDeviceNode = psDeviceNode;

	/*
		Temporarily map the firmware TQ 2D context to the kernel.
	*/
	eError = DevmemAcquireCpuVirtAddr(*ppsFWTQ2DContextMemDesc,
                                      (IMG_VOID **)&psFWTQ2DContext);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to map firmware TQ 2D context (%u)",
				eError));
		goto fail_cpuvirtacquire;
	}

	/* 
	 * Create the FW framework buffer
	 */
	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, & psFWFrameworkMemDesc, ui32FrameworkRegisterSize);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to allocate firmware GPU framework state (%u)",
				eError));
		goto fail_frameworkcreate;
	}
	
	psTmpCleanup->psFWFrameworkMemDesc = psFWFrameworkMemDesc;

	/* Copy the Framework client data into the framework buffer */
	eError = PVRSRVRGXFrameworkCopyRegisters(psFWFrameworkMemDesc, pbyFrameworkRegisters, ui32FrameworkRegisterSize);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to populate the framework buffer (%u)",
				eError));
		goto fail_frameworkcopy;
	}

	eError = RGXInitFWCommonContext(psFWTQ2DContext,
									psTQ2DCCBMemDesc,
									psTQ2DCCBCtlMemDesc,
									hMemCtxPrivData,
									psFWFrameworkMemDesc,
									ui32Priority,
									IMG_NULL,
									& psTmpCleanup->sFWComContextCleanup);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ2DContextKM: Failed to init firmware common context (%u)",
				eError));
		goto fail_contextinit;
	}

	/*
	 * Dump the TQ2D and the memory contexts
	 */
	PDUMPCOMMENT("Dump FWTQ2DContext");
	DevmemPDumpLoadMem(*ppsFWTQ2DContextMemDesc, 0, sizeof(*psFWTQ2DContext), PDUMP_FLAGS_CONTINUOUS);

	/* Release address acquired above. */
	DevmemReleaseCpuVirtAddr(*ppsFWTQ2DContextMemDesc);

	return PVRSRV_OK;
fail_contextinit:
fail_frameworkcopy:
	DevmemFwFree(psFWFrameworkMemDesc);
fail_frameworkcreate:
	DevmemReleaseCpuVirtAddr(*ppsFWTQ2DContextMemDesc);
fail_cpuvirtacquire:
	DevmemFwFree(*ppsFWTQ2DContextMemDesc);
fail_contextalloc:
	SyncPrimFree(psTmpCleanup->psCleanupSync);
fail_syncalloc:
	OSFreeMem(psTmpCleanup);
	return eError;
}
示例#11
0
/*
 * PVRSRVRGXCreateTQ3DContextKM
 */
IMG_EXPORT
PVRSRV_ERROR PVRSRVRGXCreateTQ3DContextKM(PVRSRV_DEVICE_NODE		*psDeviceNode,
										  DEVMEM_MEMDESC 			*psTQ3DCCBMemDesc,
										  DEVMEM_MEMDESC 			*psTQ3DCCBCtlMemDesc,
										  RGX_TQ3D_CLEANUP_DATA		**ppsCleanupData,
										  DEVMEM_MEMDESC 			**ppsFWTQ3DContextMemDesc,
										  DEVMEM_MEMDESC 			**ppsFWTQ3DContextStateMemDesc,
										  IMG_UINT32				ui32Priority,
										  IMG_DEV_VIRTADDR			sMCUFenceAddr,
										  IMG_UINT32				ui32FrameworkRegisterSize,
										  IMG_PBYTE					pbyFrameworkRegisters,
										  IMG_HANDLE				hMemCtxPrivData)
{
	PVRSRV_ERROR			eError = PVRSRV_OK;
	PVRSRV_RGXDEV_INFO 		*psDevInfo = psDeviceNode->pvDevice;	
	RGXFWIF_FWCOMMONCONTEXT	*psFWTQ3DContext;
	RGX_TQ3D_CLEANUP_DATA	*psTmpCleanup;
	DEVMEM_MEMDESC 			*psFWFrameworkMemDesc;

	/* Prepare cleanup struct */
	psTmpCleanup = OSAllocMem(sizeof(*psTmpCleanup));
	if (psTmpCleanup == IMG_NULL)
	{
		return PVRSRV_ERROR_OUT_OF_MEMORY;
	}


	OSMemSet(psTmpCleanup, 0, sizeof(*psTmpCleanup));
	*ppsCleanupData = psTmpCleanup;

	/* Allocate cleanup sync */
	eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
						   &psTmpCleanup->psCleanupSync);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate cleanup sync (0x%x)",
				eError));
		goto fail_syncalloc;
	}

	/*
		Allocate device memory for the firmware TQ 3D context.
	*/
	PDUMPCOMMENT("Allocate RGX firmware TQ 3D context");
	
	eError = DevmemFwAllocate(psDevInfo,
							sizeof(*psFWTQ3DContext),
							RGX_FWCOMCTX_ALLOCFLAGS,
							ppsFWTQ3DContextMemDesc);

	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to allocate firmware TQ 3D context (%u)",
				eError));
		goto fail_contextalloc;
	}
	psTmpCleanup->psFWTQ3DContextMemDesc = *ppsFWTQ3DContextMemDesc;
	psTmpCleanup->psDeviceNode = psDeviceNode;

	/*
		Temporarily map the firmware TQ 3D context to the kernel.
	*/
	eError = DevmemAcquireCpuVirtAddr(*ppsFWTQ3DContextMemDesc,
                                      (IMG_VOID **)&psFWTQ3DContext);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to map firmware TQ 3D context (%u)",
				eError));
		goto fail_cpuvirtacquire;
	}

	/*
		Allocate device memory for the firmware GPU context suspend state.
		Note: the FW reads/writes the state to memory by accessing the GPU register interface.
	*/
	PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state");

	eError = DevmemFwAllocate(psDevInfo,
							sizeof(RGXFWIF_3DCTX_STATE),
							RGX_FWCOMCTX_ALLOCFLAGS,
							ppsFWTQ3DContextStateMemDesc);

	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to allocate firmware GPU context suspend state (%u)",
				eError));
		goto fail_contextsuspendalloc;
	}
	psTmpCleanup->psFWTQ3DContextStateMemDesc = *ppsFWTQ3DContextStateMemDesc;

	/* 
	 * Create the FW framework buffer
	 */
	eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, & psFWFrameworkMemDesc, ui32FrameworkRegisterSize);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to allocate firmware GPU framework state (%u)",
				eError));
		goto fail_frameworkcreate;
	}
	
	psTmpCleanup->psFWFrameworkMemDesc = psFWFrameworkMemDesc;

	/* Copy the Framework client data into the framework buffer */
	eError = PVRSRVRGXFrameworkCopyRegisters(psFWFrameworkMemDesc, pbyFrameworkRegisters, ui32FrameworkRegisterSize);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to populate the framework buffer (%u)",
				eError));
		goto fail_frameworkcopy;
	}

	/* Init TQ/3D FW common context */
	eError = RGXInitFWCommonContext(psFWTQ3DContext,
									psTQ3DCCBMemDesc,
									psTQ3DCCBCtlMemDesc,
									hMemCtxPrivData,
									psFWFrameworkMemDesc,
									ui32Priority,
									&sMCUFenceAddr,
									&psTmpCleanup->sFWComContextCleanup);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateTQ3DContextKM: Failed to init firmware common context (%u)",
				eError));
		goto fail_contextinit;
	}

	/*
	 * Set the firmware GPU context state buffer.
	 * 
	 * The common context stores a dword pointer (FW) so we can cast the generic buffer to
	 * the correct 3D (3D/TQ = normal 3D) state structure type in the FW.
	 */
	RGXSetFirmwareAddress(&psFWTQ3DContext->psContextState,
								   *ppsFWTQ3DContextStateMemDesc,
								   0,
								   RFW_FWADDR_FLAG_NONE);

	/*
	 * Dump the TQ3D and the memory contexts
	 */
	PDUMPCOMMENT("Dump FWTQ3DContext");
	DevmemPDumpLoadMem(*ppsFWTQ3DContextMemDesc, 0, sizeof(*psFWTQ3DContext), PDUMP_FLAGS_CONTINUOUS);

	/*
	 * Dump the FW TQ/3D context suspend state buffer
	 */
	PDUMPCOMMENT("Dump FWTQ3DContextState");
	DevmemPDumpLoadMem(*ppsFWTQ3DContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS);

	/* Release address acquired above. */
	DevmemReleaseCpuVirtAddr(*ppsFWTQ3DContextMemDesc);

	return PVRSRV_OK;

fail_contextinit:
fail_frameworkcopy:
	DevmemFwFree(psFWFrameworkMemDesc);
fail_frameworkcreate:
	DevmemFwFree(*ppsFWTQ3DContextStateMemDesc);
fail_contextsuspendalloc:
	DevmemReleaseCpuVirtAddr(*ppsFWTQ3DContextMemDesc);
fail_cpuvirtacquire:
	DevmemFwFree(*ppsFWTQ3DContextMemDesc);
fail_contextalloc:
	SyncPrimFree(psTmpCleanup->psCleanupSync);
fail_syncalloc:
	OSFreeMem(psTmpCleanup);
	return eError;
}