Esempio n. 1
0
/*
 *  ======== DMM_MapMemory ========
 *  Purpose:
 *      Add a mapping block to the reserved chunk. DMM assumes that this block
 *  will be mapped in the DSP/IVA's address space. DMM returns an error if a
 *  mapping overlaps another one. This function stores the info that will be
 *  required later while unmapping the block.
 */
DSP_STATUS DMM_MapMemory(struct DMM_OBJECT *hDmmMgr, u32 addr, u32 size)
{
	struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr;
	struct MapPage *chunk;
	DSP_STATUS status = DSP_SOK;

	GT_3trace(DMM_debugMask, GT_ENTER,
		 "Entered DMM_MapMemory () hDmmMgr %x, "
		 "addr %x, size %x\n", hDmmMgr, addr, size);
	SYNC_EnterCS(pDmmObj->hDmmLock);
	/* Find the Reserved memory chunk containing the DSP block to
	 * be mapped */
	chunk = (struct MapPage *)GetRegion(addr);
	if (chunk != NULL) {
		/* Mark the region 'mapped', leave the 'reserved' info as-is */
		chunk->bMapped = true;
		chunk->MappedSize = (size/PG_SIZE_4K);
	} else
		status = DSP_ENOTFOUND;
	SYNC_LeaveCS(pDmmObj->hDmmLock);
	GT_2trace(DMM_debugMask, GT_4CLASS,
		 "Leaving DMM_MapMemory status %x, chunk %x\n",
		status, chunk);
	return status;
}
Esempio n. 2
0
/*
 *  ======== DMM_UnMapMemory ========
 *  Purpose:
 *      Remove the mapped block from the reserved chunk.
 */
DSP_STATUS DMM_UnMapMemory(struct DMM_OBJECT *hDmmMgr, u32 addr, u32 *pSize)
{
	struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr;
	struct MapPage *chunk;
	DSP_STATUS status = DSP_SOK;

	GT_3trace(DMM_debugMask, GT_ENTER,
		 "Entered DMM_UnMapMemory () hDmmMgr %x, "
		 "addr %x, pSize %x\n", hDmmMgr, addr, pSize);
	SYNC_EnterCS(pDmmObj->hDmmLock);
	chunk = GetMappedRegion(addr) ;
	if (chunk == NULL)
		status = DSP_ENOTFOUND ;

	if (DSP_SUCCEEDED(status)) {
		/* Unmap the region */
		*pSize = chunk->MappedSize * PG_SIZE_4K;
		chunk->bMapped = false;
		chunk->MappedSize = 0;
	}
	SYNC_LeaveCS(pDmmObj->hDmmLock);
	GT_3trace(DMM_debugMask, GT_ENTER,
		 "Leaving DMM_UnMapMemory status %x, chunk"
		 " %x,  *pSize %x\n", status, chunk, *pSize);

	return status;
}
Esempio n. 3
0
/*
 *  ======== NTFY_Notify ========
 *  Purpose:
 *      Execute notify function (signal event) for every
 *      element in the notification list that is to be notified about the
 *      event specified in uEventMask.
 */
void NTFY_Notify(struct NTFY_OBJECT *hNtfy, u32 uEventMask)
{
	struct NOTIFICATION *pNotify;

	DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE));

	/*
	 *  Go through notifyList and notify all clients registered for
	 *  uEventMask events.
	 */

	(void) SYNC_EnterCS(hNtfy->hSync);

	pNotify = (struct NOTIFICATION *)LST_First(hNtfy->notifyList);
	while (pNotify != NULL) {
		if (pNotify->uEventMask & uEventMask) {
			/* Notify */
			if (pNotify->uNotifyType == DSP_SIGNALEVENT)
				(void)SYNC_SetEvent(pNotify->hSync);

		}
		pNotify = (struct NOTIFICATION *)LST_Next(hNtfy->notifyList,
			  (struct list_head *)pNotify);
	}

	(void) SYNC_LeaveCS(hNtfy->hSync);
}
Esempio n. 4
0
/*
 *  ======== WMD_MSG_DeleteQueue ========
 *      Delete a MSG queue allocated in WMD_MSG_CreateQueue.
 */
void WMD_MSG_DeleteQueue(struct MSG_QUEUE *hMsgQueue)
{
	struct MSG_MGR *hMsgMgr = hMsgQueue->hMsgMgr;
	u32 refCount;

	DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE));
	hMsgQueue->fDone = true;
	 /*  Unblock all threads blocked in MSG_Get() or MSG_Put().  */
	refCount = hMsgQueue->refCount;
	while (refCount) {
		/* Unblock thread */
		SYNC_SetEvent(hMsgQueue->hSyncDone);
		/* Wait for acknowledgement */
		SYNC_WaitOnEvent(hMsgQueue->hSyncDoneAck, SYNC_INFINITE);
		refCount = hMsgQueue->refCount;
	}
	/* Remove message queue from hMsgMgr->queueList */
	(void)SYNC_EnterCS(hMsgMgr->hSyncCS);
	LST_RemoveElem(hMsgMgr->queueList, (struct LST_ELEM *)hMsgQueue);
	/* Free the message queue object */
	DeleteMsgQueue(hMsgQueue, hMsgQueue->uMaxMsgs);
       if (!hMsgMgr->msgFreeList)
               goto func_cont;
	if (LST_IsEmpty(hMsgMgr->msgFreeList))
		SYNC_ResetEvent(hMsgMgr->hSyncEvent);
func_cont:
	(void)SYNC_LeaveCS(hMsgMgr->hSyncCS);
}
Esempio n. 5
0
/*
 *  ======== DMM_DeleteTables ========
 *  Purpose:
 *      Delete DMM Tables.
 */
DSP_STATUS DMM_DeleteTables(struct DMM_OBJECT *hDmmMgr)
{
	struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr;
	DSP_STATUS status = DSP_SOK;

	GT_1trace(DMM_debugMask, GT_ENTER,
		"Entered DMM_DeleteTables () hDmmMgr %x\n", hDmmMgr);
	DBC_Require(cRefs > 0);
	if (MEM_IsValidHandle(hDmmMgr, DMMSIGNATURE)) {
		/* Delete all DMM tables */
		SYNC_EnterCS(pDmmObj->hDmmLock);

		if (pVirtualMappingTable != NULL)
			MEM_Free(pVirtualMappingTable);

		if (pPhysicalAddrTable != NULL)
			MEM_Free(pPhysicalAddrTable);

		SYNC_LeaveCS(pDmmObj->hDmmLock);
	} else
		status = DSP_EHANDLE;
	GT_1trace(DMM_debugMask, GT_4CLASS,
		"Leaving DMM_DeleteTables status %x\n", status);
	return status;
}
Esempio n. 6
0
/*
 *  ======== NTFY_Delete ========
 *  Purpose:
 *      Free resources allocated in NTFY_Create.
 */
void NTFY_Delete(struct NTFY_OBJECT *hNtfy)
{
	struct NOTIFICATION *pNotify;

	DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE));

	/* Remove any elements remaining in list */
	if (hNtfy->notifyList) {

		(void) SYNC_EnterCS(hNtfy->hSync);
	
		while ((pNotify = (struct NOTIFICATION *)LST_GetHead(hNtfy->
								notifyList))) {
			DeleteNotify(pNotify);
		}
		DBC_Assert(LST_IsEmpty(hNtfy->notifyList));
		kfree(hNtfy->notifyList);

		(void) SYNC_LeaveCS(hNtfy->hSync);
	}

	
	if (hNtfy->hSync)
		(void)SYNC_DeleteCS(hNtfy->hSync);

	MEM_FreeObject(hNtfy);
}
Esempio n. 7
0
/*
 *  ======== WMD_CHNL_CancelIO ========
 *      Return all I/O requests to the client which have not yet been
 *      transferred.  The channel's I/O completion object is
 *      signalled, and all the I/O requests are queued as IOC's, with the
 *      status field set to CHNL_IOCSTATCANCEL.
 *      This call is typically used in abort situations, and is a prelude to
 *      CHNL_Close();
 */
DSP_STATUS WMD_CHNL_CancelIO(struct CHNL_OBJECT *hChnl)
{
	DSP_STATUS status = DSP_SOK;
	struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl;
	u32 iChnl = -1;
	short int uMode;
	struct CHNL_IRP *pChirp;
	struct CHNL_MGR *pChnlMgr = NULL;

	/* Check args: */
	if (MEM_IsValidHandle(pChnl, CHNL_SIGNATURE) && pChnl->pChnlMgr) {
		iChnl = pChnl->uId;
		uMode = pChnl->uMode;
		pChnlMgr = pChnl->pChnlMgr;
	} else {
		status = DSP_EHANDLE;
	}
	if (DSP_FAILED(status))
		goto func_end;

	 /*  Mark this channel as cancelled, to prevent further IORequests or
	 *  IORequests or dispatching.  */
	SYNC_EnterCS(pChnlMgr->hCSObj);
	pChnl->dwState |= CHNL_STATECANCEL;
	if (LST_IsEmpty(pChnl->pIORequests))
		goto func_cont;

	if (pChnl->uChnlType == CHNL_PCPY) {
		/* Indicate we have no more buffers available for transfer: */
		if (CHNL_IsInput(pChnl->uMode)) {
			IO_CancelChnl(pChnlMgr->hIOMgr, iChnl);
		} else {
			/* Record that we no longer have output buffers
			 * available: */
			pChnlMgr->dwOutputMask &= ~(1 << iChnl);
		}
	}
	/* Move all IOR's to IOC queue:  */
	while (!LST_IsEmpty(pChnl->pIORequests)) {
		pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIORequests);
		if (pChirp) {
			pChirp->cBytes = 0;
			pChirp->status |= CHNL_IOCSTATCANCEL;
			LST_PutTail(pChnl->pIOCompletions,
				   (struct list_head *)pChirp);
			pChnl->cIOCs++;
			pChnl->cIOReqs--;
			DBC_Assert(pChnl->cIOReqs >= 0);
		}
	}
func_cont:
		SYNC_LeaveCS(pChnlMgr->hCSObj);
func_end:
	return status;
}
Esempio n. 8
0
/*
 *  ======== REG_DeleteValue ========
 *  Deletes a registry entry value.  NOTE:  A registry entry value is not the
 *  same as *  a registry key.
 */
DSP_STATUS REG_DeleteValue(IN CONST char *pstrValue)
{
	DSP_STATUS status;
	DBC_Require(strlen(pstrValue) < REG_MAXREGPATHLENGTH);

	SYNC_EnterCS(reglock);
	status = regsupDeleteValue(pstrValue);
	SYNC_LeaveCS(reglock);

	return status;
}
Esempio n. 9
0
/*  ======== DMM_CreateTables ========
 *  Purpose:
 *      Create table to hold the information of physical address
 *      the buffer pages that is passed by the user, and the table
 *      to hold the information of the virtual memory that is reserved
 *      for DSP.
 */
DSP_STATUS DMM_CreateTables(struct DMM_OBJECT *hDmmMgr, u32 addr, u32 size)
{
	struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr;
	DSP_STATUS status = DSP_SOK;

	GT_3trace(DMM_debugMask, GT_ENTER,
		 "Entered DMM_CreateTables () hDmmMgr %x, addr"
		 " %x, size %x\n", hDmmMgr, addr, size);
	status = DMM_DeleteTables(pDmmObj);
	if (DSP_SUCCEEDED(status)) {
		SYNC_EnterCS(pDmmObj->hDmmLock);
		dynMemMapBeg = addr;
		TableSize = (size/PG_SIZE_4K) + 1;
		/*  Create the free list */
		pVirtualMappingTable = (struct MapPage *) MEM_Calloc
		(TableSize*sizeof(struct MapPage), MEM_NONPAGED);
		if (pVirtualMappingTable == NULL)
			status = DSP_EMEMORY;
		else {
			/* This table will be used
			* to store the virtual to physical
			* address translations
			*/
			pPhysicalAddrTable = (u32 *)MEM_Calloc
				(TableSize*sizeof(u32), MEM_NONPAGED);
			GT_1trace(DMM_debugMask, GT_4CLASS,
			"DMM_CreateTables: Allocate"
			"memory for pPhysicalAddrTable=%d entries\n",
			TableSize);
			if (pPhysicalAddrTable == NULL) {
				status = DSP_EMEMORY;
				GT_0trace(DMM_debugMask, GT_7CLASS,
				    "DMM_CreateTables: Memory allocation for "
				    "pPhysicalAddrTable failed\n");
			} else {
			/* On successful allocation,
			* all entries are zero ('free') */
			iFreeRegion = 0;
			iFreeSize = TableSize*PG_SIZE_4K;
			pVirtualMappingTable[0].RegionSize = TableSize;
			}
		}
		SYNC_LeaveCS(pDmmObj->hDmmLock);
	} else
		GT_0trace(DMM_debugMask, GT_7CLASS,
			 "DMM_CreateTables: DMM_DeleteTables"
			 "Failure\n");

	GT_1trace(DMM_debugMask, GT_4CLASS, "Leaving DMM_CreateTables status"
							"0x%x\n", status);
	return status;
}
Esempio n. 10
0
/*
 *  ======== DMM_ReserveMemory ========
 *  Purpose:
 *      Reserve a chunk of virtually contiguous DSP/IVA address space.
 */
DSP_STATUS DMM_ReserveMemory(struct DMM_OBJECT *hDmmMgr, u32 size,
							u32 *pRsvAddr)
{
	DSP_STATUS status = DSP_SOK;
	struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr;
	struct MapPage *node;
	u32 rsvAddr = 0;
	u32 rsvSize = 0;

	GT_3trace(DMM_debugMask, GT_ENTER,
		 "Entered DMM_ReserveMemory () hDmmMgr %x, "
		 "size %x, pRsvAddr %x\n", hDmmMgr, size, pRsvAddr);
	SYNC_EnterCS(pDmmObj->hDmmLock);

	/* Try to get a DSP chunk from the free list */
	node = GetFreeRegion(size);
	if (node != NULL) {
		/*  DSP chunk of given size is available. */
		rsvAddr = DMM_ADDR_VIRTUAL(node);
		/* Calculate the number entries to use */
		rsvSize = size/PG_SIZE_4K;
		if (rsvSize < node->RegionSize) {
			/* Mark remainder of free region */
			node[rsvSize].bMapped = false;
			node[rsvSize].bReserved = false;
			node[rsvSize].RegionSize = node->RegionSize - rsvSize;
			node[rsvSize].MappedSize = 0;
		}
		/*  GetRegion will return first fit chunk. But we only use what
			is requested. */
		node->bMapped = false;
		node->bReserved = true;
		node->RegionSize = rsvSize;
		node->MappedSize = 0;
		/* Return the chunk's starting address */
		*pRsvAddr = rsvAddr;
	} else
		/*dSP chunk of given size is not available */
		status = DSP_EMEMORY;

	SYNC_LeaveCS(pDmmObj->hDmmLock);
	GT_3trace(DMM_debugMask, GT_4CLASS,
		 "Leaving ReserveMemory status %x, rsvAddr"
		 " %x, rsvSize %x\n", status, rsvAddr, rsvSize);
	return status;
}
Esempio n. 11
0
/*
 *  ======== DMM_UnReserveMemory ========
 *  Purpose:
 *      Free a chunk of reserved DSP/IVA address space.
 */
DSP_STATUS DMM_UnReserveMemory(struct DMM_OBJECT *hDmmMgr, u32 rsvAddr)
{
	struct DMM_OBJECT *pDmmObj = (struct DMM_OBJECT *)hDmmMgr;
	struct MapPage *chunk;
	u32 i;
	DSP_STATUS status = DSP_SOK;
	u32 chunkSize;

	GT_2trace(DMM_debugMask, GT_ENTER,
		 "Entered DMM_UnReserveMemory () hDmmMgr "
		 "%x, rsvAddr %x\n", hDmmMgr, rsvAddr);

	SYNC_EnterCS(pDmmObj->hDmmLock);

	/* Find the chunk containing the reserved address */
	chunk = GetMappedRegion(rsvAddr);
	if (chunk == NULL)
		status = DSP_ENOTFOUND;

	if (DSP_SUCCEEDED(status)) {
		/* Free all the mapped pages for this reserved region */
		i = 0;
		while (i < chunk->RegionSize) {
			if (chunk[i].bMapped) {
				/* Remove mapping from the page tables. */
				chunkSize = chunk[i].MappedSize;
				/* Clear the mapping flags */
				chunk[i].bMapped = false;
				chunk[i].MappedSize = 0;
				i += chunkSize;
			} else
				i++;
		}
		/* Clear the flags (mark the region 'free') */
		chunk->bReserved = false;
		/* NOTE: We do NOT coalesce free regions here.
		 * Free regions are coalesced in GetRegion(), as it traverses
		 *the whole mapping table
		 */
	}
	SYNC_LeaveCS(pDmmObj->hDmmLock);
	GT_2trace(DMM_debugMask, GT_ENTER,
		 "Leaving DMM_UnReserveMemory status %x"
		 " chunk %x\n", status, chunk);
	return status;
}
Esempio n. 12
0
/*
 *  ======== REG_EnumValue ========
 *  Enumerates a registry key and retrieve values stored under the key.
 *  We will assume the input pdwValueSize is smaller than
 *  REG_MAXREGPATHLENGTH for implementation purposes.
 */
DSP_STATUS REG_EnumValue(IN u32 dwIndex,
			 IN CONST char *pstrKey, IN OUT char *pstrValue,
			 IN OUT u32 *pdwValueSize, IN OUT char *pstrData,
			 IN OUT u32 *pdwDataSize)
{
	DSP_STATUS status;

	DBC_Require(pstrKey && pstrValue && pdwValueSize && pstrData &&
		    pdwDataSize);
	DBC_Require(*pdwValueSize <= REG_MAXREGPATHLENGTH);
       DBC_Require(strlen(pstrKey) < REG_MAXREGPATHLENGTH);

	SYNC_EnterCS(reglock);
	status = regsupEnumValue(dwIndex, pstrKey, pstrValue, pdwValueSize,
				 pstrData, pdwDataSize);
	SYNC_LeaveCS(reglock);

	return status;
}
Esempio n. 13
0
/*
 *  ======== REG_SetValue ========
 *  Set a value in the registry.
 */
DSP_STATUS REG_SetValue(IN CONST char *pstrValue, IN u8 *pbData,
			IN u32 dwDataSize)
{
	DSP_STATUS status;

	DBC_Require(pstrValue && pbData);
	DBC_Require(dwDataSize > 0);
	DBC_Require(strlen(pstrValue) < REG_MAXREGPATHLENGTH);

	SYNC_EnterCS(reglock);
	/*
	 * We need to use regsup calls
	 * for now we don't need the key handle or
	 * the subkey, all we need is the value to lookup.
	 */
	status = regsupSetValue((char *)pstrValue, pbData, dwDataSize);
	SYNC_LeaveCS(reglock);

	return status;
}
Esempio n. 14
0
/*
 *  ======== REG_GetValue ========
 *  Retrieve a value from the registry.
 */
DSP_STATUS REG_GetValue(IN CONST char *pstrValue, OUT u8 *pbData,
			IN OUT u32 *pdwDataSize)
{
	DSP_STATUS status;

	DBC_Require(pstrValue && pbData);
       DBC_Require(strlen(pstrValue) < REG_MAXREGPATHLENGTH);

	SYNC_EnterCS(reglock);
	/*  We need to use regsup calls...  */
	/*  ...for now we don't need the key handle or  */
	/*  the subkey, all we need is the value to lookup.  */
	if (regsupGetValue((char *)pstrValue, pbData, pdwDataSize) == DSP_SOK)
		status = DSP_SOK;
	else
		status = DSP_EFAIL;

	SYNC_LeaveCS(reglock);
	return status;
}
Esempio n. 15
0
u32 DMM_MemMapDump(struct DMM_OBJECT *hDmmMgr)
{
	struct MapPage *curNode = NULL;
	u32 i;
	u32 freemem = 0;
	u32 bigsize = 0;

	SYNC_EnterCS(hDmmMgr->hDmmLock);

	if (pVirtualMappingTable != NULL) {
		for (i = 0; i < TableSize; i +=
				pVirtualMappingTable[i].RegionSize) {
			curNode = pVirtualMappingTable + i;
			if (curNode->bReserved == TRUE)	{
				/*printk("RESERVED size = 0x%x, "
					"Map size = 0x%x\n",
					(curNode->RegionSize * PG_SIZE_4K),
					(curNode->bMapped == false) ? 0 :
					(curNode->MappedSize * PG_SIZE_4K));
*/
			} else {
/*				printk("UNRESERVED size = 0x%x\n",
					(curNode->RegionSize * PG_SIZE_4K));
*/
				freemem += (curNode->RegionSize * PG_SIZE_4K);
				if (curNode->RegionSize > bigsize)
					bigsize = curNode->RegionSize;
			}
		}
	}
	printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
			freemem/(1024*1024));
	printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
			(((TableSize * PG_SIZE_4K)-freemem))/(1024*1024));
	printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
			(bigsize*PG_SIZE_4K/(1024*1024)));
	SYNC_LeaveCS(hDmmMgr->hDmmLock);

	return 0;
}
Esempio n. 16
0
/*
 *  ======== WMD_CHNL_AddIOReq ========
 *      Enqueue an I/O request for data transfer on a channel to the DSP.
 *      The direction (mode) is specified in the channel object. Note the DSP
 *      address is specified for channels opened in direct I/O mode.
 */
DSP_STATUS WMD_CHNL_AddIOReq(struct CHNL_OBJECT *hChnl, void *pHostBuf,
			    u32 cBytes, u32 cBufSize,
			    OPTIONAL u32 dwDspAddr, u32 dwArg)
{
	DSP_STATUS status = DSP_SOK;
	struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl;
	struct CHNL_IRP *pChirp = NULL;
	struct WMD_DEV_CONTEXT *dev_ctxt;
	struct DEV_OBJECT *dev_obj;
	u32 dwState;
	bool fIsEOS;
	struct CHNL_MGR *pChnlMgr = pChnl->pChnlMgr;
	u8 *pHostSysBuf = NULL;
	bool fSchedDPC = false;
	u16 wMbVal = 0;

	fIsEOS = (cBytes == 0);
	/* Validate args:  */
	if (pHostBuf == NULL) {
		status = DSP_EPOINTER;
	} else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) {
		status = DSP_EHANDLE;
	} else if (fIsEOS && CHNL_IsInput(pChnl->uMode)) {
		status = CHNL_E_NOEOS;
	} else {
		/* Check the channel state: only queue chirp if channel state
		 * allows */
		dwState = pChnl->dwState;
		if (dwState != CHNL_STATEREADY) {
			if (dwState & CHNL_STATECANCEL)
				status = CHNL_E_CANCELLED;
			else if ((dwState & CHNL_STATEEOS)
				   && CHNL_IsOutput(pChnl->uMode))
				status = CHNL_E_EOS;
			else
				/* No other possible states left: */
				DBC_Assert(0);
		}
	}

	dev_obj = DEV_GetFirst();
	DEV_GetWMDContext(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = DSP_EHANDLE;

	if (DSP_FAILED(status))
		goto func_end;

	if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1 && pHostBuf) {
		if (!(pHostBuf < (void *)USERMODE_ADDR)) {
			pHostSysBuf = pHostBuf;
			goto func_cont;
		}
		/* if addr in user mode, then copy to kernel space */
		pHostSysBuf = MEM_Alloc(cBufSize, MEM_NONPAGED);
		if (pHostSysBuf == NULL) {
			status = DSP_EMEMORY;
			goto func_end;
		}
		if (CHNL_IsOutput(pChnl->uMode)) {
			status = copy_from_user(pHostSysBuf, pHostBuf,
						cBufSize);
			if (status) {
				kfree(pHostSysBuf);
				pHostSysBuf = NULL;
				status = DSP_EPOINTER;
				goto func_end;
			}
		}
	}
func_cont:
	/* Mailbox IRQ is disabled to avoid race condition with DMA/ZCPY
	 * channels. DPCCS is held to avoid race conditions with PCPY channels.
	 * If DPC is scheduled in process context (IO_Schedule) and any
	 * non-mailbox interrupt occurs, that DPC will run and break CS. Hence
	 * we disable ALL DPCs. We will try to disable ONLY IO DPC later.  */
	SYNC_EnterCS(pChnlMgr->hCSObj);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (pChnl->uChnlType == CHNL_PCPY) {
		/* This is a processor-copy channel. */
		if (DSP_SUCCEEDED(status) && CHNL_IsOutput(pChnl->uMode)) {
			/* Check buffer size on output channels for fit. */
			if (cBytes > IO_BufSize(pChnl->pChnlMgr->hIOMgr))
				status = CHNL_E_BUFSIZE;

		}
	}
	if (DSP_SUCCEEDED(status)) {
		/* Get a free chirp: */
		pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pFreeList);
		if (pChirp == NULL)
			status = CHNL_E_NOIORPS;

	}
	if (DSP_SUCCEEDED(status)) {
		/* Enqueue the chirp on the chnl's IORequest queue: */
		pChirp->pHostUserBuf = pChirp->pHostSysBuf = pHostBuf;
		if (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1)
			pChirp->pHostSysBuf = pHostSysBuf;

		/*
		 * Note: for dma chans dwDspAddr contains dsp address
		 * of SM buffer.
		 */
		DBC_Assert(pChnlMgr->uWordSize != 0);
		/* DSP address */
		pChirp->uDspAddr = dwDspAddr / pChnlMgr->uWordSize;
		pChirp->cBytes = cBytes;
		pChirp->cBufSize = cBufSize;
		/* Only valid for output channel */
		pChirp->dwArg = dwArg;
		pChirp->status = (fIsEOS ? CHNL_IOCSTATEOS :
						CHNL_IOCSTATCOMPLETE);
		LST_PutTail(pChnl->pIORequests, (struct list_head *)pChirp);
		pChnl->cIOReqs++;
		DBC_Assert(pChnl->cIOReqs <= pChnl->cChirps);
		/* If end of stream, update the channel state to prevent
		 * more IOR's: */
		if (fIsEOS)
			pChnl->dwState |= CHNL_STATEEOS;

		/* Legacy DSM Processor-Copy */
		DBC_Assert(pChnl->uChnlType == CHNL_PCPY);
		/* Request IO from the DSP */
		IO_RequestChnl(pChnlMgr->hIOMgr, pChnl,
			(CHNL_IsInput(pChnl->uMode) ? IO_INPUT : IO_OUTPUT),
								&wMbVal);
		fSchedDPC = true;


	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	SYNC_LeaveCS(pChnlMgr->hCSObj);
	if (wMbVal != 0)
		IO_IntrDSP2(pChnlMgr->hIOMgr, wMbVal);

	/* Schedule a DPC, to do the actual data transfer: */
	if (fSchedDPC)
		IO_Schedule(pChnlMgr->hIOMgr);

func_end:
	return status;
}
Esempio n. 17
0
/*
 *  ======== WMD_CHNL_Open ========
 *      Open a new half-duplex channel to the DSP board.
 */
DSP_STATUS WMD_CHNL_Open(OUT struct CHNL_OBJECT **phChnl,
			 struct CHNL_MGR *hChnlMgr, short int uMode,
			 u32 uChnlId, CONST IN struct CHNL_ATTRS *pAttrs)
{
	DSP_STATUS status = DSP_SOK;
	struct CHNL_MGR *pChnlMgr = hChnlMgr;
	struct CHNL_OBJECT *pChnl = NULL;
	struct SYNC_ATTRS *pSyncAttrs = NULL;
	struct SYNC_OBJECT *hSyncEvent = NULL;
	/* Ensure DBC requirements:  */
	DBC_Require(phChnl != NULL);
	DBC_Require(pAttrs != NULL);
	DBC_Require(hChnlMgr != NULL);
	*phChnl = NULL;

	/* Validate Args:  */
	if (pAttrs->uIOReqs == 0) {
		status = DSP_EINVALIDARG;
	} else {
		if (!MEM_IsValidHandle(hChnlMgr, CHNL_MGRSIGNATURE)) {
			status = DSP_EHANDLE;
		} else {
			if (uChnlId != CHNL_PICKFREE) {
				if (uChnlId >= pChnlMgr->cChannels)
					status = CHNL_E_BADCHANID;
				else if (pChnlMgr->apChannel[uChnlId] !=
					  NULL)
					status = CHNL_E_CHANBUSY;
			} else {
				/* Check for free channel */
				status = SearchFreeChannel(pChnlMgr, &uChnlId);
			}
		}
	}
	if (DSP_FAILED(status))
		goto func_end;

	DBC_Assert(uChnlId < pChnlMgr->cChannels);
	/* Create channel object:  */
	MEM_AllocObject(pChnl, struct CHNL_OBJECT, 0x0000);
	if (!pChnl) {
		status = DSP_EMEMORY;
		goto func_end;
	}
	/* Protect queues from IO_DPC: */
	pChnl->dwState = CHNL_STATECANCEL;
	/* Allocate initial IOR and IOC queues: */
	pChnl->pFreeList = CreateChirpList(pAttrs->uIOReqs);
	pChnl->pIORequests = CreateChirpList(0);
	pChnl->pIOCompletions = CreateChirpList(0);
	pChnl->cChirps = pAttrs->uIOReqs;
	pChnl->cIOCs = 0;
	pChnl->cIOReqs = 0;
	status = SYNC_OpenEvent(&hSyncEvent, pSyncAttrs);
	if (DSP_SUCCEEDED(status))
		status = NTFY_Create(&pChnl->hNtfy);

	if (DSP_SUCCEEDED(status)) {
		if (pChnl->pIOCompletions && pChnl->pIORequests &&
		   pChnl->pFreeList) {
			/* Initialize CHNL object fields:    */
			pChnl->pChnlMgr = pChnlMgr;
			pChnl->uId = uChnlId;
			pChnl->uMode = uMode;
			pChnl->hUserEvent = hSyncEvent;	/* for Linux */
			pChnl->hSyncEvent = hSyncEvent;
			/* Get the process handle */
			pChnl->hProcess = current->tgid;
			pChnl->pCBArg = 0;
			pChnl->cBytesMoved = 0;
			/* Default to proc-copy */
			pChnl->uChnlType = CHNL_PCPY;
		} else {
			status = DSP_EMEMORY;
		}
	}

	if (DSP_FAILED(status)) {
		/* Free memory */
		if (pChnl->pIOCompletions) {
			FreeChirpList(pChnl->pIOCompletions);
			pChnl->pIOCompletions = NULL;
			pChnl->cIOCs = 0;
		}
		if (pChnl->pIORequests) {
			FreeChirpList(pChnl->pIORequests);
			pChnl->pIORequests = NULL;
		}
		if (pChnl->pFreeList) {
			FreeChirpList(pChnl->pFreeList);
			pChnl->pFreeList = NULL;
		}
		if (hSyncEvent) {
			SYNC_CloseEvent(hSyncEvent);
			hSyncEvent = NULL;
		}
		if (pChnl->hNtfy) {
			NTFY_Delete(pChnl->hNtfy);
			pChnl->hNtfy = NULL;
		}
		MEM_FreeObject(pChnl);
	} else {
		/* Insert channel object in channel manager: */
		pChnlMgr->apChannel[pChnl->uId] = pChnl;
		SYNC_EnterCS(pChnlMgr->hCSObj);
		pChnlMgr->cOpenChannels++;
		SYNC_LeaveCS(pChnlMgr->hCSObj);
		/* Return result... */
		pChnl->dwSignature = CHNL_SIGNATURE;
		pChnl->dwState = CHNL_STATEREADY;
		*phChnl = pChnl;
	}
func_end:
	DBC_Ensure((DSP_SUCCEEDED(status) &&
		  MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) ||
		  (*phChnl == NULL));
	return status;
}
Esempio n. 18
0
/*
 *  ======== WMD_CHNL_GetIOC ========
 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
 *      completion record, which contains information about the completed
 *      I/O request.
 *      Note: Ensures Channel Invariant (see notes above).
 */
DSP_STATUS WMD_CHNL_GetIOC(struct CHNL_OBJECT *hChnl, u32 dwTimeOut,
			  OUT struct CHNL_IOC *pIOC)
{
	DSP_STATUS status = DSP_SOK;
	struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl;
	struct CHNL_IRP *pChirp;
	DSP_STATUS statSync;
	bool fDequeueIOC = true;
	struct CHNL_IOC ioc = { NULL, 0, 0, 0, 0 };
	u8 *pHostSysBuf = NULL;
	struct WMD_DEV_CONTEXT *dev_ctxt;
	struct DEV_OBJECT *dev_obj;

	/* Check args: */
	if (pIOC == NULL) {
		status = DSP_EPOINTER;
	} else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) {
		status = DSP_EHANDLE;
	} else if (dwTimeOut == CHNL_IOCNOWAIT) {
		if (LST_IsEmpty(pChnl->pIOCompletions))
			status = CHNL_E_NOIOC;

	}

	dev_obj = DEV_GetFirst();
	DEV_GetWMDContext(dev_obj, &dev_ctxt);
	if (!dev_ctxt)
		status = DSP_EHANDLE;

	if (DSP_FAILED(status))
		goto func_end;

	ioc.status = CHNL_IOCSTATCOMPLETE;
	if (dwTimeOut != CHNL_IOCNOWAIT && LST_IsEmpty(pChnl->pIOCompletions)) {
		if (dwTimeOut == CHNL_IOCINFINITE)
			dwTimeOut = SYNC_INFINITE;

		statSync = SYNC_WaitOnEvent(pChnl->hSyncEvent, dwTimeOut);
		if (statSync == DSP_ETIMEOUT) {
			/* No response from DSP */
			ioc.status |= CHNL_IOCSTATTIMEOUT;
			fDequeueIOC = false;
		} else if (statSync == DSP_EFAIL) {
			/* This can occur when the user mode thread is
			 * aborted (^C), or when _VWIN32_WaitSingleObject()
			 * fails due to unkown causes.  */
			/* Even though Wait failed, there may be something in
			 * the Q: */
			if (LST_IsEmpty(pChnl->pIOCompletions)) {
				ioc.status |= CHNL_IOCSTATCANCEL;
				fDequeueIOC = false;
			}
		}
	}
	/* See comment in AddIOReq */
	SYNC_EnterCS(pChnl->pChnlMgr->hCSObj);
	omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
	if (fDequeueIOC) {
		/* Dequeue IOC and set pIOC; */
		DBC_Assert(!LST_IsEmpty(pChnl->pIOCompletions));
		pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIOCompletions);
		/* Update pIOC from channel state and chirp: */
		if (pChirp) {
			pChnl->cIOCs--;
			/*  If this is a zero-copy channel, then set IOC's pBuf
			 *  to the DSP's address. This DSP address will get
			 *  translated to user's virtual addr later.  */
			{
				pHostSysBuf = pChirp->pHostSysBuf;
				ioc.pBuf = pChirp->pHostUserBuf;
			}
			ioc.cBytes = pChirp->cBytes;
			ioc.cBufSize = pChirp->cBufSize;
			ioc.dwArg = pChirp->dwArg;
			ioc.status |= pChirp->status;
			/* Place the used chirp on the free list: */
			LST_PutTail(pChnl->pFreeList,
					(struct list_head *)pChirp);
		} else {
			ioc.pBuf = NULL;
			ioc.cBytes = 0;
		}
	} else {
		ioc.pBuf = NULL;
		ioc.cBytes = 0;
		ioc.dwArg = 0;
		ioc.cBufSize = 0;
	}
	/* Ensure invariant: If any IOC's are queued for this channel... */
	if (!LST_IsEmpty(pChnl->pIOCompletions)) {
		/*  Since DSPStream_Reclaim() does not take a timeout
		 *  parameter, we pass the stream's timeout value to
		 *  WMD_CHNL_GetIOC. We cannot determine whether or not
		 *  we have waited in User mode. Since the stream's timeout
		 *  value may be non-zero, we still have to set the event.
		 *  Therefore, this optimization is taken out.
		 *
		 *  if (dwTimeOut == CHNL_IOCNOWAIT) {
		 *    ... ensure event is set..
		 *      SYNC_SetEvent(pChnl->hSyncEvent);
		 *  } */
		SYNC_SetEvent(pChnl->hSyncEvent);
	} else {
		/* else, if list is empty, ensure event is reset. */
		SYNC_ResetEvent(pChnl->hSyncEvent);
	}
	omap_mbox_enable_irq(dev_ctxt->mbox, IRQ_RX);
	SYNC_LeaveCS(pChnl->pChnlMgr->hCSObj);
	if (fDequeueIOC && (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1)) {
		if (!(ioc.pBuf < (void *) USERMODE_ADDR))
			goto func_cont;

		/* If the addr is in user mode, then copy it */
		if (!pHostSysBuf || !ioc.pBuf) {
			status = DSP_EPOINTER;
			goto func_cont;
		}
		if (!CHNL_IsInput(pChnl->uMode))
			goto func_cont1;

		/*pHostUserBuf */
		status = copy_to_user(ioc.pBuf, pHostSysBuf, ioc.cBytes);
		if (status) {
			if (current->flags & PF_EXITING)
				status = 0;
		}
		if (status)
			status = DSP_EPOINTER;
func_cont1:
		kfree(pHostSysBuf);
	}
func_cont:
	/* Update User's IOC block: */
	*pIOC = ioc;
func_end:
	return status;
}
Esempio n. 19
0
/*
 *  ======== NTFY_Register ========
 *  Purpose:
 *      Add a notification element to the list. If the notification is already
 *      registered, and uEventMask != 0, the notification will get posted for
 *      events specified in the new event mask. If the notification is already
 *      registered and uEventMask == 0, the notification will be unregistered.
 */
DSP_STATUS NTFY_Register(struct NTFY_OBJECT *hNtfy,
			 struct DSP_NOTIFICATION *hNotification,
			 u32 uEventMask, u32 uNotifyType)
{
	struct NOTIFICATION *pNotify;
	struct SYNC_ATTRS syncAttrs;
	DSP_STATUS status = DSP_SOK;

	DBC_Require(MEM_IsValidHandle(hNtfy, NTFY_SIGNATURE));

	if (hNotification == NULL)
		status = DSP_EHANDLE;

	/* Return DSP_ENOTIMPL if uNotifyType is not supported */
	if (DSP_SUCCEEDED(status)) {
		if (!IsValidNotifyMask(uNotifyType))
			status = DSP_ENOTIMPL;

	}

	if (DSP_FAILED(status))
		return status;

	(void)SYNC_EnterCS(hNtfy->hSync);

	pNotify = (struct NOTIFICATION *)LST_First(hNtfy->notifyList);
	while (pNotify != NULL) {
		/* If there is more than one notification type, each
		 * type may require its own handler code.  */

		if (hNotification->handle == pNotify->hSync) {
			/* found */
			break;
		}
		pNotify = (struct NOTIFICATION *)LST_Next(hNtfy->notifyList,
			  (struct list_head *)pNotify);
	}
	if (pNotify == NULL) {
		/* Not registered */
		if (uEventMask == 0) {
			status = DSP_EVALUE;
		} else {
			/* Allocate NOTIFICATION object, add to list */
			pNotify = MEM_Calloc(sizeof(struct NOTIFICATION),
					     MEM_PAGED);
			if (pNotify == NULL)
				status = DSP_EMEMORY;

		}
		if (DSP_SUCCEEDED(status)) {
			LST_InitElem((struct list_head *)pNotify);
			 /* If there is more than one notification type, each
			 * type may require its own handler code. */
			status = SYNC_OpenEvent(&pNotify->hSync, &syncAttrs);
			hNotification->handle = pNotify->hSync;

			if (DSP_SUCCEEDED(status)) {
				pNotify->uEventMask = uEventMask;
				pNotify->uNotifyType = uNotifyType;
				LST_PutTail(hNtfy->notifyList,
					   (struct list_head *)pNotify);
			} else {
				DeleteNotify(pNotify);
			}
		}
	} else {
		/* Found in list */
		if (uEventMask == 0) {
			/* Remove from list and free */
			LST_RemoveElem(hNtfy->notifyList,
				      (struct list_head *)pNotify);
			DeleteNotify(pNotify);
		} else {
			/* Update notification mask (type shouldn't change) */
			pNotify->uEventMask = uEventMask;
		}
	}
	(void)SYNC_LeaveCS(hNtfy->hSync);
	return status;
}
Esempio n. 20
0
/*
 *  ======== WMD_MSG_CreateQueue ========
 *      Create a MSG_QUEUE for sending/receiving messages to/from a node
 *      on the DSP.
 */
DSP_STATUS WMD_MSG_CreateQueue(struct MSG_MGR *hMsgMgr,
			      OUT struct MSG_QUEUE **phMsgQueue,
			      u32 dwId, u32 uMaxMsgs, HANDLE hArg)
{
	u32 i;
	u32 uNumAllocated = 0;
	struct MSG_QUEUE *pMsgQ;
	DSP_STATUS status = DSP_SOK;

	DBC_Require(MEM_IsValidHandle(hMsgMgr, MSGMGR_SIGNATURE));
	DBC_Require(phMsgQueue != NULL);

	*phMsgQueue = NULL;
	/* Allocate MSG_QUEUE object */
	MEM_AllocObject(pMsgQ, struct MSG_QUEUE, MSGQ_SIGNATURE);
	if (!pMsgQ) {
		status = DSP_EMEMORY;
		goto func_end;
	}
	LST_InitElem((struct LST_ELEM *) pMsgQ);
	pMsgQ->uMaxMsgs = uMaxMsgs;
	pMsgQ->hMsgMgr = hMsgMgr;
	pMsgQ->hArg = hArg;	/* Node handle */
	pMsgQ->dwId = dwId;	/* Node env (not valid yet) */
	/* Queues of Message frames for messages from the DSP */
	pMsgQ->msgFreeList = LST_Create();
	pMsgQ->msgUsedList = LST_Create();
	if (pMsgQ->msgFreeList == NULL || pMsgQ->msgUsedList == NULL)
		status = DSP_EMEMORY;

	 /*  Create event that will be signalled when a message from
	 *  the DSP is available.  */
	if (DSP_SUCCEEDED(status))
		status = SYNC_OpenEvent(&pMsgQ->hSyncEvent, NULL);

	/* Create a notification list for message ready notification. */
	if (DSP_SUCCEEDED(status))
		status = NTFY_Create(&pMsgQ->hNtfy);

	 /*  Create events that will be used to synchronize cleanup
	 *  when the object is deleted. hSyncDone will be set to
	 *  unblock threads in MSG_Put() or MSG_Get(). hSyncDoneAck
	 *  will be set by the unblocked thread to signal that it
	 *  is unblocked and will no longer reference the object.  */
	if (DSP_SUCCEEDED(status))
		status = SYNC_OpenEvent(&pMsgQ->hSyncDone, NULL);

	if (DSP_SUCCEEDED(status))
		status = SYNC_OpenEvent(&pMsgQ->hSyncDoneAck, NULL);

	if (DSP_SUCCEEDED(status)) {
               if (!hMsgMgr->msgFreeList) {
                       status = DSP_EHANDLE;
                       goto func_end;
               }
		/* Enter critical section */
		(void)SYNC_EnterCS(hMsgMgr->hSyncCS);
		/* Initialize message frames and put in appropriate queues */
		for (i = 0; i < uMaxMsgs && DSP_SUCCEEDED(status); i++) {
			status = AddNewMsg(hMsgMgr->msgFreeList);
			if (DSP_SUCCEEDED(status)) {
				uNumAllocated++;
				status = AddNewMsg(pMsgQ->msgFreeList);
			}
		}
		if (DSP_FAILED(status)) {
			/*  Stay inside CS to prevent others from taking any
			 *  of the newly allocated message frames.  */
			DeleteMsgQueue(pMsgQ, uNumAllocated);
		} else {
			LST_PutTail(hMsgMgr->queueList,
				   (struct LST_ELEM *)pMsgQ);
			*phMsgQueue = pMsgQ;
			/* Signal that free frames are now available */
			if (!LST_IsEmpty(hMsgMgr->msgFreeList))
				SYNC_SetEvent(hMsgMgr->hSyncEvent);

		}
		/* Exit critical section */
		(void)SYNC_LeaveCS(hMsgMgr->hSyncCS);
	} else {
		DeleteMsgQueue(pMsgQ, 0);
	}
func_end:
	return status;
}
Esempio n. 21
0
/*
 *  ======== WMD_MSG_Get ========
 *      Get a message from a MSG queue.
 */
DSP_STATUS WMD_MSG_Get(struct MSG_QUEUE *hMsgQueue,
		      struct DSP_MSG *pMsg, u32 uTimeout)
{
	struct MSG_FRAME *pMsgFrame;
	struct MSG_MGR *hMsgMgr;
	bool fGotMsg = false;
	struct SYNC_OBJECT *hSyncs[2];
	u32 uIndex;
	DSP_STATUS status = DSP_SOK;

	DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE));
	DBC_Require(pMsg != NULL);

	hMsgMgr = hMsgQueue->hMsgMgr;
       if (!hMsgQueue->msgUsedList) {
               status = DSP_EHANDLE;
               goto func_end;
       }

	/* Enter critical section */
	(void)SYNC_EnterCS(hMsgMgr->hSyncCS);
	/* If a message is already there, get it */
	if (!LST_IsEmpty(hMsgQueue->msgUsedList)) {
		pMsgFrame = (struct MSG_FRAME *)LST_GetHead(hMsgQueue->
			    msgUsedList);
		if (pMsgFrame != NULL) {
			*pMsg = pMsgFrame->msgData.msg;
			LST_PutTail(hMsgQueue->msgFreeList,
				   (struct LST_ELEM *)pMsgFrame);
			if (LST_IsEmpty(hMsgQueue->msgUsedList))
				SYNC_ResetEvent(hMsgQueue->hSyncEvent);
			else {
				NTFY_Notify(hMsgQueue->hNtfy,
				    DSP_NODEMESSAGEREADY);
				SYNC_SetEvent(hMsgQueue->hSyncEvent);
			}

			fGotMsg = true;
		}
	} else {
		if (hMsgQueue->fDone)
			status = DSP_EFAIL;
		else
			hMsgQueue->refCount++;

	}
	/* Exit critical section */
	(void)SYNC_LeaveCS(hMsgMgr->hSyncCS);
	if (DSP_SUCCEEDED(status) && !fGotMsg) {
		/*  Wait til message is available, timeout, or done. We don't
		 *  have to schedule the DPC, since the DSP will send messages
		 *  when they are available.  */
		hSyncs[0] = hMsgQueue->hSyncEvent;
		hSyncs[1] = hMsgQueue->hSyncDone;
		status = SYNC_WaitOnMultipleEvents(hSyncs, 2, uTimeout,
			 &uIndex);
		/* Enter critical section */
		(void)SYNC_EnterCS(hMsgMgr->hSyncCS);
		if (hMsgQueue->fDone) {
			hMsgQueue->refCount--;
			/* Exit critical section */
			(void)SYNC_LeaveCS(hMsgMgr->hSyncCS);
			 /*  Signal that we're not going to access hMsgQueue
			  *  anymore, so it can be deleted.  */
			(void)SYNC_SetEvent(hMsgQueue->hSyncDoneAck);
			status = DSP_EFAIL;
		} else {
			if (DSP_SUCCEEDED(status)) {
				DBC_Assert(!LST_IsEmpty(hMsgQueue->
					  msgUsedList));
				/* Get msg from used list */
				pMsgFrame = (struct MSG_FRAME *)
					   LST_GetHead(hMsgQueue->msgUsedList);
				/* Copy message into pMsg and put frame on the
				 * free list */
				if (pMsgFrame != NULL) {
					*pMsg = pMsgFrame->msgData.msg;
					LST_PutTail(hMsgQueue->msgFreeList,
					(struct LST_ELEM *)pMsgFrame);
				}
			}
			hMsgQueue->refCount--;
			/* Reset the event if there are still queued messages */
			if (!LST_IsEmpty(hMsgQueue->msgUsedList)) {
				NTFY_Notify(hMsgQueue->hNtfy,
					DSP_NODEMESSAGEREADY);
				SYNC_SetEvent(hMsgQueue->hSyncEvent);
			}

			/* Exit critical section */
			(void)SYNC_LeaveCS(hMsgMgr->hSyncCS);
		}
	}
func_end:
	return status;
}
Esempio n. 22
0
/*
 *  ======== WMD_MSG_Put ========
 *      Put a message onto a MSG queue.
 */
DSP_STATUS WMD_MSG_Put(struct MSG_QUEUE *hMsgQueue,
		      IN CONST struct DSP_MSG *pMsg, u32 uTimeout)
{
	struct MSG_FRAME *pMsgFrame;
	struct MSG_MGR *hMsgMgr;
	bool fPutMsg = false;
	struct SYNC_OBJECT *hSyncs[2];
	u32 uIndex;
	DSP_STATUS status = DSP_SOK;

	DBC_Require(MEM_IsValidHandle(hMsgQueue, MSGQ_SIGNATURE));
	DBC_Require(pMsg != NULL);

	hMsgMgr = hMsgQueue->hMsgMgr;

       if (!hMsgMgr->msgFreeList) {
               status = DSP_EHANDLE;
               goto func_end;
       }


	(void) SYNC_EnterCS(hMsgMgr->hSyncCS);

	/* If a message frame is available, use it */
	if (!LST_IsEmpty(hMsgMgr->msgFreeList)) {
		pMsgFrame = (struct MSG_FRAME *)LST_GetHead(hMsgMgr->
			    msgFreeList);
		if (pMsgFrame != NULL) {
			pMsgFrame->msgData.msg = *pMsg;
			pMsgFrame->msgData.dwId = hMsgQueue->dwId;
			LST_PutTail(hMsgMgr->msgUsedList, (struct LST_ELEM *)
				   pMsgFrame);
			hMsgMgr->uMsgsPending++;
			fPutMsg = true;
		}
		if (LST_IsEmpty(hMsgMgr->msgFreeList))
			SYNC_ResetEvent(hMsgMgr->hSyncEvent);

		/* Release critical section before scheduling DPC */
		(void)SYNC_LeaveCS(hMsgMgr->hSyncCS);
		/* Schedule a DPC, to do the actual data transfer: */
		IO_Schedule(hMsgMgr->hIOMgr);
	} else {
		if (hMsgQueue->fDone)
			status = DSP_EFAIL;
		else
			hMsgQueue->refCount++;

		(void)SYNC_LeaveCS(hMsgMgr->hSyncCS);
	}
	if (DSP_SUCCEEDED(status) && !fPutMsg) {
		/* Wait til a free message frame is available, timeout,
		 * or done */
		hSyncs[0] = hMsgMgr->hSyncEvent;
		hSyncs[1] = hMsgQueue->hSyncDone;
		status = SYNC_WaitOnMultipleEvents(hSyncs, 2, uTimeout,
			 &uIndex);
		/* Enter critical section */
		(void)SYNC_EnterCS(hMsgMgr->hSyncCS);
		if (hMsgQueue->fDone) {
			hMsgQueue->refCount--;
			/* Exit critical section */
			(void)SYNC_LeaveCS(hMsgMgr->hSyncCS);
			 /*  Signal that we're not going to access hMsgQueue
			  *  anymore, so it can be deleted.  */
			(void)SYNC_SetEvent(hMsgQueue->hSyncDoneAck);
			status = DSP_EFAIL;
		} else {
			if (DSP_SUCCEEDED(status)) {
                               if (LST_IsEmpty(hMsgMgr->msgFreeList)) {
                                       status = DSP_EPOINTER;
                                       goto func_cont;
                               }
				/* Get msg from free list */
				pMsgFrame = (struct MSG_FRAME *)
					    LST_GetHead(hMsgMgr->msgFreeList);
				/* Copy message into pMsg and put frame on the
				 * used list */
				if (pMsgFrame != NULL) {
					pMsgFrame->msgData.msg = *pMsg;
					pMsgFrame->msgData.dwId =
						hMsgQueue->dwId;
					LST_PutTail(hMsgMgr->msgUsedList,
						   (struct LST_ELEM *)
						   pMsgFrame);
					hMsgMgr->uMsgsPending++;
					/* Schedule a DPC, to do the actual
					 * data transfer: */
					IO_Schedule(hMsgMgr->hIOMgr);
				}
			}
			hMsgQueue->refCount--;
			/* Reset event if there are still frames available */
			if (!LST_IsEmpty(hMsgMgr->msgFreeList))
				SYNC_SetEvent(hMsgMgr->hSyncEvent);
func_cont:
			/* Exit critical section */
			(void) SYNC_LeaveCS(hMsgMgr->hSyncCS);
		}
	}
func_end:
	return status;
}
Esempio n. 23
0
/*
 *  ======== WMD_CHNL_GetIOC ========
 *      Optionally wait for I/O completion on a channel.  Dequeue an I/O
 *      completion record, which contains information about the completed
 *      I/O request.
 *      Note: Ensures Channel Invariant (see notes above).
 */
DSP_STATUS WMD_CHNL_GetIOC(struct CHNL_OBJECT *hChnl, u32 dwTimeOut,
			  OUT struct CHNL_IOC *pIOC)
{
	DSP_STATUS status = DSP_SOK;
	struct CHNL_OBJECT *pChnl = (struct CHNL_OBJECT *)hChnl;
	struct CHNL_IRP *pChirp;
	DSP_STATUS statSync;
	bool fDequeueIOC = true;
	struct CHNL_IOC ioc = { NULL, 0, 0, 0, 0 };
	u8 *pHostSysBuf = NULL;

	DBG_Trace(DBG_ENTER, "> WMD_CHNL_GetIOC pChnl %p CHNL_IsOutput %x "
		 "uChnlType %x\n", pChnl, CHNL_IsOutput(pChnl->uMode),
		 pChnl->uChnlType);
	/* Check args: */
	if (pIOC == NULL) {
		status = DSP_EPOINTER;
	} else if (!MEM_IsValidHandle(pChnl, CHNL_SIGNATURE)) {
		status = DSP_EHANDLE;
	} else if (dwTimeOut == CHNL_IOCNOWAIT) {
		if (LST_IsEmpty(pChnl->pIOCompletions))
			status = CHNL_E_NOIOC;

	}
	if (DSP_FAILED(status))
		goto func_end;

	ioc.status = CHNL_IOCSTATCOMPLETE;
	if (dwTimeOut != CHNL_IOCNOWAIT && LST_IsEmpty(pChnl->pIOCompletions)) {
		if (dwTimeOut == CHNL_IOCINFINITE)
			dwTimeOut = SYNC_INFINITE;

		statSync = SYNC_WaitOnEvent(pChnl->hSyncEvent, dwTimeOut);
		if (statSync == DSP_ETIMEOUT) {
			/* No response from DSP */
			ioc.status |= CHNL_IOCSTATTIMEOUT;
			fDequeueIOC = false;
		} else if (statSync == DSP_EFAIL) {
			/* This can occur when the user mode thread is
			 * aborted (^C), or when _VWIN32_WaitSingleObject()
			 * fails due to unkown causes.  */
			/* Even though Wait failed, there may be something in
			 * the Q: */
			if (LST_IsEmpty(pChnl->pIOCompletions)) {
				ioc.status |= CHNL_IOCSTATCANCEL;
				fDequeueIOC = false;
			}
		}
	}
	/* See comment in AddIOReq */
	SYNC_EnterCS(pChnl->pChnlMgr->hCSObj);
	disable_irq(MAILBOX_IRQ);
	if (fDequeueIOC) {
		/* Dequeue IOC and set pIOC; */
		DBC_Assert(!LST_IsEmpty(pChnl->pIOCompletions));
		pChirp = (struct CHNL_IRP *)LST_GetHead(pChnl->pIOCompletions);
		/* Update pIOC from channel state and chirp: */
		if (pChirp) {
			pChnl->cIOCs--;
			/*  If this is a zero-copy channel, then set IOC's pBuf
			 *  to the DSP's address. This DSP address will get
			 *  translated to user's virtual addr later.  */
			{
				pHostSysBuf = pChirp->pHostSysBuf;
				ioc.pBuf = pChirp->pHostUserBuf;
			}
			ioc.cBytes = pChirp->cBytes;
			ioc.cBufSize = pChirp->cBufSize;
			ioc.dwArg = pChirp->dwArg;
			ioc.status |= pChirp->status;
			/* Place the used chirp on the free list: */
			LST_PutTail(pChnl->pFreeList, (struct LST_ELEM *)
				   pChirp);
		} else {
			ioc.pBuf = NULL;
			ioc.cBytes = 0;
		}
	} else {
		ioc.pBuf = NULL;
		ioc.cBytes = 0;
		ioc.dwArg = 0;
		ioc.cBufSize = 0;
	}
	/* Ensure invariant: If any IOC's are queued for this channel... */
	if (!LST_IsEmpty(pChnl->pIOCompletions)) {
		/*  Since DSPStream_Reclaim() does not take a timeout
		 *  parameter, we pass the stream's timeout value to
		 *  WMD_CHNL_GetIOC. We cannot determine whether or not
		 *  we have waited in User mode. Since the stream's timeout
		 *  value may be non-zero, we still have to set the event.
		 *  Therefore, this optimization is taken out.
		 *
		 *  if (dwTimeOut == CHNL_IOCNOWAIT) {
		 *    ... ensure event is set..
		 *      SYNC_SetEvent(pChnl->hSyncEvent);
		 *  } */
		SYNC_SetEvent(pChnl->hSyncEvent);
	} else {
		/* else, if list is empty, ensure event is reset. */
		SYNC_ResetEvent(pChnl->hSyncEvent);
	}
	enable_irq(MAILBOX_IRQ);
	SYNC_LeaveCS(pChnl->pChnlMgr->hCSObj);
	if (fDequeueIOC && (pChnl->uChnlType == CHNL_PCPY && pChnl->uId > 1)) {
		if (!(ioc.pBuf < (void *) USERMODE_ADDR))
			goto func_cont;

		/* If the addr is in user mode, then copy it */
		if (!pHostSysBuf || !ioc.pBuf) {
			status = DSP_EPOINTER;
			DBG_Trace(DBG_LEVEL7,
				 "System buffer NULL in IO completion.\n");
			goto func_cont;
		}
		if (!CHNL_IsInput(pChnl->uMode))
			goto func_cont1;

		/*pHostUserBuf */
		status = copy_to_user(ioc.pBuf, pHostSysBuf, ioc.cBytes);
#ifndef RES_CLEANUP_DISABLE
		if (status) {
			if (current->flags & PF_EXITING) {
				DBG_Trace(DBG_LEVEL7,
					 "\n2current->flags ==  PF_EXITING, "
					 " current->flags;0x%x\n",
					 current->flags);
				status = 0;
			} else {
				DBG_Trace(DBG_LEVEL7,
					 "\n2current->flags != PF_EXITING, "
					 " current->flags;0x%x\n",
					 current->flags);
			}
		}
#endif
		if (status) {
			DBG_Trace(DBG_LEVEL7,
				 "Error copying kernel buffer to user, %d"
				 " bytes remaining.  in_interupt %d\n",
				 status, in_interrupt());
			status = DSP_EPOINTER;
		}
func_cont1:
		MEM_Free(pHostSysBuf);
	}
func_cont:
	/* Update User's IOC block: */
	*pIOC = ioc;
func_end:
	DBG_Trace(DBG_ENTER, "< WMD_CHNL_GetIOC pChnl %p\n", pChnl);
	return status;
}